Upgrade to Pro — share decks privately, control downloads, hide ads and more …

WebXR: Beyond
 WebGL

yomotsu
November 25, 2018
1.5k

WebXR: Beyond
 WebGL

yomotsu

November 25, 2018
Tweet

Transcript

  1. • The API for both AR and VR
 (WebVR API

    will be replaced by WebXR Device API) • Close relationship
 with Khronos’ OpenXR WebXR Device API
  2. • Currently, only available in
 Chrome Canary with flags •

    Only works on either
 https or localhost As of Nov. 2018
  3. XRSession const glCanvas = document.createElement( 'canvas' ); const gl =

    glCanvas.getContext( 'webgl', { xrCompatible: true } ); xrSession.baseLayer = new XRWebGLLayer( xrSession, gl ); WebGL context to be used
 as a source for XR imagery
  4. const width = window.innerWidth; const height = window.innerHeight; navigator.xr.requestDevice().then( (

    device ) => { const outputCanvas = document.getElementById( 'xrCanvas' ); outputCanvas.width = width; outputCanvas.height = height; const xrContext = outputCanvas.getContext( 'xrpresent' ); // session request must be done in user action such as click window.addEventListener( 'click', onEnterAR ); async function onEnterAR() { const xrSession = await device.requestSession( { outputContext: xrContext, environmentIntegration: true, } ); const renderer = new THREE.WebGLRenderer(); renderer.autoClear = false; renderer.setSize( width, height );
  5. const width = window.innerWidth; const height = window.innerHeight; navigator.xr.requestDevice().then( (

    device ) => { const outputCanvas = document.getElementById( 'xrCanvas' ); outputCanvas.width = width; outputCanvas.height = height; const xrContext = outputCanvas.getContext( 'xrpresent' ); // session request must be done in user action such as click window.addEventListener( 'click', onEnterAR ); async function onEnterAR() { const xrSession = await device.requestSession( { outputContext: xrContext, environmentIntegration: true, } ); const renderer = new THREE.WebGLRenderer(); renderer.autoClear = false; renderer.setSize( width, height );
  6. const width = window.innerWidth; const height = window.innerHeight; navigator.xr.requestDevice().then( (

    device ) => { const outputCanvas = document.getElementById( 'xrCanvas' ); outputCanvas.width = width; outputCanvas.height = height; const xrContext = outputCanvas.getContext( 'xrpresent' ); // session request must be done in user action such as click window.addEventListener( 'click', onEnterAR ); async function onEnterAR() { const xrSession = await device.requestSession( { outputContext: xrContext, environmentIntegration: true, } ); const renderer = new THREE.WebGLRenderer(); renderer.autoClear = false; renderer.setSize( width, height );
  7. window.addEventListener( 'click', onEnterAR ); async function onEnterAR() { const xrSession

    = await device.requestSession( { outputContext: xrContext, environmentIntegration: true, } ); const renderer = new THREE.WebGLRenderer(); renderer.autoClear = false; renderer.setSize( width, height ); // bind gl context to XR session const gl = renderer.getContext(); gl.setCompatibleXRDevice( xrSession.device ); xrSession.baseLayer = new XRWebGLLayer( xrSession, gl ); const scene = new THREE.Scene(); const camera = new THREE.PerspectiveCamera(); camera.matrixAutoUpdate = false; const box = new THREE.Mesh( new THREE.BoxBufferGeometry( .2, .2, .2 ), new THREE.MeshNormalMaterial()
  8. window.addEventListener( 'click', onEnterAR ); async function onEnterAR() { const xrSession

    = await device.requestSession( { outputContext: xrContext, environmentIntegration: true, } ); const renderer = new THREE.WebGLRenderer(); renderer.autoClear = false; renderer.setSize( width, height ); // bind gl context to XR session const gl = renderer.getContext(); gl.setCompatibleXRDevice( xrSession.device ); xrSession.baseLayer = new XRWebGLLayer( xrSession, gl ); const scene = new THREE.Scene(); const camera = new THREE.PerspectiveCamera(); camera.matrixAutoUpdate = false; const box = new THREE.Mesh( new THREE.BoxBufferGeometry( .2, .2, .2 ), new THREE.MeshNormalMaterial()
  9. window.addEventListener( 'click', onEnterAR ); async function onEnterAR() { const xrSession

    = await device.requestSession( { outputContext: xrContext, environmentIntegration: true, } ); const renderer = new THREE.WebGLRenderer(); renderer.autoClear = false; renderer.setSize( width, height ); // bind gl context to XR session const gl = renderer.getContext(); gl.setCompatibleXRDevice( xrSession.device ); xrSession.baseLayer = new XRWebGLLayer( xrSession, gl ); const scene = new THREE.Scene(); const camera = new THREE.PerspectiveCamera(); camera.matrixAutoUpdate = false; const box = new THREE.Mesh( new THREE.BoxBufferGeometry( .2, .2, .2 ), new THREE.MeshNormalMaterial()
  10. xrSession.baseLayer = new XRWebGLLayer( xrSession, gl ); const scene =

    new THREE.Scene(); const camera = new THREE.PerspectiveCamera(); camera.matrixAutoUpdate = false; const box = new THREE.Mesh( new THREE.BoxBufferGeometry( .2, .2, .2 ), new THREE.MeshNormalMaterial() ); scene.add( box ); const frameOfRef = await xrSession.requestFrameOfReference( 'eye-level' ); xrSession.requestAnimationFrame( onDrawFrame ); function onDrawFrame( timestamp, xrFrame ) { const session = xrFrame.session; // xrSession === xrFrame.session const pose = xrFrame.getDevicePose( frameOfRef ); session.requestAnimationFrame( onDrawFrame ); gl.bindFramebuffer( gl.FRAMEBUFFER, session.baseLayer.framebuffer );
  11. scene.add( box ); const frameOfRef = await xrSession.requestFrameOfReference( 'eye-level' );

    xrSession.requestAnimationFrame( onDrawFrame ); function onDrawFrame( timestamp, xrFrame ) { const session = xrFrame.session; // xrSession === xrFrame.session const pose = xrFrame.getDevicePose( frameOfRef ); session.requestAnimationFrame( onDrawFrame ); gl.bindFramebuffer( gl.FRAMEBUFFER, session.baseLayer.framebuffer ); if ( ! pose ) return; // if the session is for both right and left eyes, length of views would be 2. // if not, length is 1, xrFrame.views.forEach( ( view ) => { const viewport = session.baseLayer.getViewport( view ); renderer.setSize( viewport.width, viewport.height );
  12. scene.add( box ); const frameOfRef = await xrSession.requestFrameOfReference( 'eye-level' );

    xrSession.requestAnimationFrame( onDrawFrame ); function onDrawFrame( timestamp, xrFrame ) { const session = xrFrame.session; // xrSession === xrFrame.session const pose = xrFrame.getDevicePose( frameOfRef ); session.requestAnimationFrame( onDrawFrame ); gl.bindFramebuffer( gl.FRAMEBUFFER, session.baseLayer.framebuffer ); if ( ! pose ) return; // if the session is for both right and left eyes, length of views would be 2. // if not, length is 1, xrFrame.views.forEach( ( view ) => { const viewport = session.baseLayer.getViewport( view ); renderer.setSize( viewport.width, viewport.height );
  13. function onDrawFrame( timestamp, xrFrame ) { const session = xrFrame.session;

    // xrSession === xrFrame.session const pose = xrFrame.getDevicePose( frameOfRef ); session.requestAnimationFrame( onDrawFrame ); gl.bindFramebuffer( gl.FRAMEBUFFER, session.baseLayer.framebuffer ); if ( ! pose ) return; // if the session is for both right and left eyes, length of views would be 2. // if not, length is 1, xrFrame.views.forEach( ( view ) => { const viewport = session.baseLayer.getViewport( view ); renderer.setSize( viewport.width, viewport.height ); camera.projectionMatrix.fromArray( view.projectionMatrix ); const viewMatrix = new THREE.Matrix4().fromArray( pose.getViewMatrix( view ) ); camera.matrix.getInverse( viewMatrix ); camera.updateMatrixWorld( true ); renderer.clearDepth(); renderer.render( scene, camera );
  14. function onDrawFrame( timestamp, xrFrame ) { const session = xrFrame.session;

    // xrSession === xrFrame.session const pose = xrFrame.getDevicePose( frameOfRef ); session.requestAnimationFrame( onDrawFrame ); gl.bindFramebuffer( gl.FRAMEBUFFER, session.baseLayer.framebuffer ); if ( ! pose ) return; // if the session is for both right and left eyes, length of views would be 2. // if not, length is 1, xrFrame.views.forEach( ( view ) => { const viewport = session.baseLayer.getViewport( view ); renderer.setSize( viewport.width, viewport.height ); camera.projectionMatrix.fromArray( view.projectionMatrix ); const viewMatrix = new THREE.Matrix4().fromArray( pose.getViewMatrix( view ) ); camera.matrix.getInverse( viewMatrix ); camera.updateMatrixWorld( true ); renderer.clearDepth(); renderer.render( scene, camera );
  15. if ( ! pose ) return; // if the session

    is for both right and left eyes, length of views would be 2. // if not, length is 1, xrFrame.views.forEach( ( view ) => { const viewport = session.baseLayer.getViewport( view ); renderer.setSize( viewport.width, viewport.height ); camera.projectionMatrix.fromArray( view.projectionMatrix ); const viewMatrix = new THREE.Matrix4().fromArray( pose.getViewMatrix( view ) ); camera.matrix.getInverse( viewMatrix ); camera.updateMatrixWorld( true ); renderer.clearDepth(); renderer.render( scene, camera ); } ); } } } );
  16. const width = window.innerWidth; const height = window.innerHeight; const startButton

    = document.getElementById( 'startButton' ); navigator.xr.requestDevice().then( ( device ) => { const outputCanvas = document.getElementById( 'xrCanvas' ); outputCanvas.width = width; outputCanvas.height = height; const xrContext = outputCanvas.getContext( 'xrpresent' ); // session request must be done in user action such as click startButton.addEventListener( 'click', onEnterAR ); async function onEnterAR() { startButton.style.display = 'none'; const xrSession = await device.requestSession( { The same as previous one…
  17. renderer.render( scene, camera ); } ); } window.addEventListener( 'click', onClick

    ); // use Raycaster to make ray origin and direction const raycaster = new THREE.Raycaster(); // onClick must be async, since hitTest will be done with await async function onClick() { const x = 0; const y = 0; raycaster.setFromCamera( { x, y }, camera ); const origin = new Float32Array( raycaster.ray.origin.toArray() ); const direction = new Float32Array( raycaster.ray.direction.toArray() ); const hits = await xrSession.requestHitTest( origin, direction, frameOfRef ); if ( hits.length ) { Add a click action
  18. renderer.render( scene, camera ); } ); } window.addEventListener( 'click', onClick

    ); // use Raycaster to make ray origin and direction const raycaster = new THREE.Raycaster(); // onClick must be async, since hitTest will be done with await async function onClick() { const x = 0; const y = 0; raycaster.setFromCamera( { x, y }, camera ); const origin = new Float32Array( raycaster.ray.origin.toArray() ); const direction = new Float32Array( raycaster.ray.direction.toArray() ); const hits = await xrSession.requestHitTest( origin, direction, frameOfRef ); if ( hits.length ) {
  19. window.addEventListener( 'click', onClick ); // use Raycaster to make ray

    origin and direction const raycaster = new THREE.Raycaster(); // onClick must be async, since hitTest will be done with await async function onClick() { const x = 0; const y = 0; raycaster.setFromCamera( { x, y }, camera ); const origin = new Float32Array( raycaster.ray.origin.toArray() ); const direction = new Float32Array( raycaster.ray.direction.toArray() ); const hits = await xrSession.requestHitTest( origin, direction, frameOfRef ); if ( hits.length ) { const hit = hits[ 0 ]; const hitMatrix = new THREE.Matrix4().fromArray( hit.hitMatrix ); const box = new THREE.Mesh(
  20. const raycaster = new THREE.Raycaster(); // onClick must be async,

    since hitTest will be done with await async function onClick() { const x = 0; const y = 0; raycaster.setFromCamera( { x, y }, camera ); const origin = new Float32Array( raycaster.ray.origin.toArray() ); const direction = new Float32Array( raycaster.ray.direction.toArray() ); const hits = await xrSession.requestHitTest( origin, direction, frameOfRef ); if ( hits.length ) { const hit = hits[ 0 ]; const hitMatrix = new THREE.Matrix4().fromArray( hit.hitMatrix ); const box = new THREE.Mesh( new THREE.BoxBufferGeometry( .2, .2, .2 ), new THREE.MeshNormalMaterial() );
  21. const raycaster = new THREE.Raycaster(); // onClick must be async,

    since hitTest will be done with await async function onClick() { const x = 0; const y = 0; raycaster.setFromCamera( { x, y }, camera ); const origin = new Float32Array( raycaster.ray.origin.toArray() ); const direction = new Float32Array( raycaster.ray.direction.toArray() ); const hits = await xrSession.requestHitTest( origin, direction, frameOfRef ); if ( hits.length ) { const hit = hits[ 0 ]; const hitMatrix = new THREE.Matrix4().fromArray( hit.hitMatrix ); const box = new THREE.Mesh( new THREE.BoxBufferGeometry( .2, .2, .2 ), new THREE.MeshNormalMaterial() );
  22. raycaster.setFromCamera( { x, y }, camera ); const origin =

    new Float32Array( raycaster.ray.origin.toArray() ); const direction = new Float32Array( raycaster.ray.direction.toArray() ); const hits = await xrSession.requestHitTest( origin, direction, frameOfRef ); if ( hits.length ) { const hit = hits[ 0 ]; const hitMatrix = new THREE.Matrix4().fromArray( hit.hitMatrix ); const box = new THREE.Mesh( new THREE.BoxBufferGeometry( .2, .2, .2 ), new THREE.MeshNormalMaterial() ); box.position.setFromMatrixPosition( hitMatrix ); scene.add( box ); } } }
  23. raycaster.setFromCamera( { x, y }, camera ); const origin =

    new Float32Array( raycaster.ray.origin.toArray() ); const direction = new Float32Array( raycaster.ray.direction.toArray() ); const hits = await xrSession.requestHitTest( origin, direction, frameOfRef ); if ( hits.length ) { const hit = hits[ 0 ]; const hitMatrix = new THREE.Matrix4().fromArray( hit.hitMatrix ); const box = new THREE.Mesh( new THREE.BoxBufferGeometry( .2, .2, .2 ), new THREE.MeshNormalMaterial() ); box.position.setFromMatrixPosition( hitMatrix ); scene.add( box ); } } }
  24. • Upon the release of iOS 12 • Only works

    exclusively on Safari
 (Doesn’t work even in iOS Chrome) • Special HTML syntax • Apple’s propriety AR Quick Look
  25. • Stands for “Universal Scene Description” archived with Zip •

    3D model format for AR Quick Look • Created by Apple and Pixar USDZ
  26. • With “usdz_converter”
 which is a Xcode command line
 Only

    for macOS • Vectary (Web service)
 Cannot configure details such as size How to prepare USDZ
  27. • Up to 10M Polygons • Up to 10 seconds

    for the animation • Up to 2048×2048 texture size Limitations
  28. $ xcrun usdz_converter ./my-model.obj my-model.usdz -color_map albedo.jpg -metallic_map metallic.jpg
 -roughness_map

    roughness.jpg -normal_map normal.jpg -ao_map ao.jpg -emissive_map emissive.jpg USDZ Converter Terminal.app
  29. $ xcrun usdz_converter ./my-model.obj my-model.usdz -color_map albedo.jpg -metallic_map metallic.jpg
 -roughness_map

    roughness.jpg -normal_map normal.jpg -ao_map ao.jpg -emissive_map emissive.jpg Terminal.app
  30. $ xcrun usdz_converter ./my-model.obj my-model.usdz -color_map albedo.jpg -metallic_map metallic.jpg
 -roughness_map

    roughness.jpg -normal_map normal.jpg -ao_map ao.jpg -emissive_map emissive.jpg Terminal.app File input Output name Input option name Option value
  31. • Apple's propriety
 (Hopefully it’s a temporary spec until WebXR

    Device API) • Some limitations of USDZ • Just pop and show in AR
 (Cannot be utilized as game and others)
  32. • Stands for GL Transmission Format • Open standard 3D

    model format • JPEG of 3D • Maintained by Khronos What is glTF
  33. • JSON format as the container with binary payloads
 or

    packed single binary file called glb • Animation supported • Extensible just like WebGL Spec
 (Like Adobe Fireworks PNG) What is glTF
  34. • Supported by many 3D modeling tools • Several WebGL

    libraries support glTF loading
 (Such as three.js, BabylonJS, Cesium) • Microsoft Paint3D, Office and others use glb as 3D model format • Adobe Animate has glTF exporter • Facebook's 3D posts use glb • VRM: glTF extended format for humanoid avatar
 (For Virtual-YouTuber, VRChat and others) glTF of the present
  35. const width = window.innerWidth; const height = window.innerHeight; const scene

    = new THREE.Scene(); const camera = new THREE.PerspectiveCamera( 45, width / height, 0.001, 100 ); camera.position.set( 0, 0, 0.5 ); const renderer = new THREE.WebGLRenderer(); renderer.setSize( width, height ); renderer.gammaInput = true; renderer.gammaOutput = true; document.body.appendChild( renderer.domElement ); scene.add( new THREE.HemisphereLight( 0xffffff , 0x332222 ) );
  36. document.body.appendChild( renderer.domElement ); scene.add( new THREE.HemisphereLight( 0xffffff , 0x332222 )

    ); const loader = new THREE.GLTFLoader(); loader.load( './models/barger/barger.gltf', function ( gltf ) { scene.add( gltf.scene ); } ); ( function anim () { requestAnimationFrame( anim );
  37. scene.add( gltf.scene ); } ); ( function anim () {

    requestAnimationFrame( anim ); renderer.render( scene, camera ); } )();
  38. • Gaze tracking
 Detect virtual keyboard type with gaze direction

    in a VR environment • Trusted environment
 Motion sickness by low FPS and pose track error • Fingerprinting
 Identify users room shape or even face shape with depth
  39. • A Web API (in development) • For both VR

    and AR • Just works on WebBrowsers
 no add-ons nor installation required WebXR Device API
  40. • The standard • 3D model format in JSON or

    Binary • Can be seen many places • Loaders are available in JavaScript glTF
  41. Web will be connected
 to the real world 94 Source:

    https://www.netflix.com/jp/title/80182418 © ࢜࿠ਖ਼फɾProduction I.G/ߨஊࣾɾ߈֪ػಈୂ੡࡞ҕһձ