Related
I have some code that generates two WebGL triangles, one green and one blue, and puts them on a mapbox-gl-js map. The four coordinates are those of four European cities. Here is the code I have:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Add a custom style layer</title>
<meta name="viewport" content="initial-scale=1,maximum-scale=1,user-scalable=no">
<link href="https://api.mapbox.com/mapbox-gl-js/v2.12.0/mapbox-gl.css" rel="stylesheet">
<script src="https://api.mapbox.com/mapbox-gl-js/v2.12.0/mapbox-gl.js"></script>
<style>
body { margin: 0; padding: 0; }
#map { position: absolute; top: 0; bottom: 0; width: 100%; }
</style>
</head>
<body>
<div id="map"></div>
<script>
mapboxgl.accessToken = 'pk.eyJ1Ijoic3RlZXBhdHRpY3N0YWlycyIsImEiOiJjbDNvaGFod2EwbXluM2pwZTJiMDYzYjh5In0.J_HeH00ry0tbLmGmTy4z5w';
const map = new mapboxgl.Map({
container: 'map',
zoom: 3,
center: [7.5, 58],
// Choose from Mapbox's core styles, or make your own style with Mapbox Studio
style: 'mapbox://styles/mapbox/light-v11',
antialias: true, // create the gl context with MSAA antialiasing, so custom layers are antialiased
projection: 'mercator'
});
// define vertices of the triangle to be rendered in the custom style layer
const helsinki = mapboxgl.MercatorCoordinate.fromLngLat({
lng: 25.004,
lat: 60.239
});
const berlin = mapboxgl.MercatorCoordinate.fromLngLat({
lng: 13.403,
lat: 52.562
});
const kyiv = mapboxgl.MercatorCoordinate.fromLngLat({
lng: 30.498,
lat: 50.541
});
const moscow = mapboxgl.MercatorCoordinate.fromLngLat({
lng: 37.6173,
lat: 55.7558
});
const vertex_buffer = new Float32Array([
helsinki.x,
helsinki.y,
berlin.x,
berlin.y,
kyiv.x,
kyiv.y,
kyiv.x,
kyiv.y,
helsinki.x,
helsinki.y,
moscow.x,
moscow.y
])
const color_buffer = new Float32Array([
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
])
const highlightLayer = {
id: 'highlight',
type: 'custom',
// method called when the layer is added to the map
// https://docs.mapbox.com/mapbox-gl-js/api/#styleimageinterface#onadd
onAdd: function (map, gl) {
// create GLSL source for vertex shader
const vertexSource = `
uniform mat4 u_matrix;
attribute vec2 a_pos;
attribute vec4 color;
varying vec4 vColor;
void main() {
gl_Position = u_matrix * vec4(a_pos, 0.0, 1.0);
vColor = color;
}`;
// create GLSL source for fragment shader
const fragmentSource = `
precision lowp float;
varying vec4 vColor;
void main() {
gl_FragColor = vec4(vColor);
}`;
// create a vertex shader
const vertexShader = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vertexShader, vertexSource);
gl.compileShader(vertexShader);
// create a fragment shader
const fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fragmentShader, fragmentSource);
gl.compileShader(fragmentShader);
// link the two shaders into a WebGL program
this.program = gl.createProgram();
gl.attachShader(this.program, vertexShader);
gl.attachShader(this.program, fragmentShader);
gl.linkProgram(this.program);
this.aPos = gl.getAttribLocation(this.program, 'a_pos');
this.color = gl.getAttribLocation(this.program, 'color');
// create and initialize a WebGLBuffer to store vertex and color data
this.vertexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, this.vertexBuffer);
gl.bufferData(
gl.ARRAY_BUFFER,
vertex_buffer,
gl.STATIC_DRAW
);
this.colorBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, this.colorBuffer);
gl.bufferData(
gl.ARRAY_BUFFER,
color_buffer,
gl.STATIC_DRAW
);
},
// method fired on each animation frame
// https://docs.mapbox.com/mapbox-gl-js/api/#map.event:render
render: function (gl, matrix) {
gl.useProgram(this.program);
gl.uniformMatrix4fv(
gl.getUniformLocation(this.program, 'u_matrix'),
false,
matrix
);
gl.bindBuffer(gl.ARRAY_BUFFER, this.vertexBuffer);
gl.enableVertexAttribArray(this.aPos);
gl.vertexAttribPointer(this.aPos, 2, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, this.colorBuffer);
gl.enableVertexAttribArray(this.color);
gl.vertexAttribPointer(this.color, 4, gl.FLOAT, false, 0, 0);
gl.enable(gl.BLEND);
gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA);
gl.drawArrays(gl.TRIANGLES, 0, vertex_buffer.length / 2);
}
};
// add the custom style layer to the map
map.on('load', () => {
map.addLayer(highlightLayer, 'building');
});
</script>
</body>
</html>
This generates an image that looks like this:
However, I want to blur, or smooth the polygons, so they look like this:
(disregard the fact that the map is also blurred, I just blurred the whole image so you can understand what I want the polygons to look like)
Is there any way I can achieve this? Maybe I can implement a Gaussian blur, or use some type of shader? I'm rather new to WebGL, so I would really appreciate some help.
Dear Firend : I am new at WebGl. I have managed to draw and rotate a triangle
-- but the problem is that the triangle
1, The triangle change size and shape while drawing.
2, I dont know how to rotate the triangle around one of its cornor.
Following is the code. I have written the code in a linear fashion with a utility class (GlUtil) that wraps up the boring tasks.
I am using a function called perc2glCoord that allow me to enter percentages and convert them into gl coordinates.
import GlUtil from "./glUtil/glUtil.js";
import perc2glCoord from "./functions/perc2glCoord.js";
const gl = GlUtil.getGl("bilza");
console.log(gl);
const vertices = [
perc2glCoord (50) ,perc2glCoord (50), 1,0,0,
perc2glCoord (50) ,perc2glCoord (75), 0,1,0,
perc2glCoord (75) ,perc2glCoord (50), 0,0,1,
// perc2glCoord (25) ,perc2glCoord (50), 1,0,0,
// perc2glCoord (75) ,perc2glCoord (50), 0,1,0,
// perc2glCoord (50) ,perc2glCoord (75), 0,0,1,
];
const vertexShaderSrc =
`
attribute highp vec2 a_pos;
attribute highp vec3 a_clr;
uniform float translateX;
uniform float translateY;
uniform float angle;
varying highp vec3 vColor;
void main(void) {
gl_Position = vec4(
translateX + (a_pos.x * cos(angle) - a_pos.y * sin(angle)),
translateY + (a_pos.x * sin(angle) + a_pos.y * cos(angle)),
1.0,
1.0 );
vColor = a_clr;
}
`;
const fragShaderSrc =
`
varying highp vec3 vColor;
void main(void) {
gl_FragColor = vec4 (vColor , 1.0);
}
`;
const vertexShader = GlUtil.createShader(gl,vertexShaderSrc,gl.VERTEX_SHADER);
const fragmentShader = GlUtil.createShader(gl,fragShaderSrc,gl.FRAGMENT_SHADER);
const programe = GlUtil.getProgram(gl,vertexShader,fragmentShader);
const VOB = GlUtil.getBuffer(gl);
GlUtil.bindBuffer(gl,VOB,vertices);
GlUtil.linkNuseProgram(gl,programe);
let angleValue = 0;
function draw(){
GlUtil.setAttribute(gl,"a_pos",programe, 2 ,4*5,0);
GlUtil.setAttribute(gl,"a_clr",programe, 3 , 4*5,2 * 4);
const translateXLoc = gl.getUniformLocation(programe, "translateX");
gl.uniform1f(translateXLoc,0.0);
const translateYLoc = gl.getUniformLocation(programe, "translateY");
gl.uniform1f(translateYLoc,0.0);
const angleLoc = gl.getUniformLocation(programe, "angle");
const rands = Math.PI * angleValue /180;
gl.uniform1f(angleLoc,rands);
angleValue+= 0.1;
/////////////////////---draw-----------------
GlUtil.clear(gl,0.1,0.1,0.2);
gl.drawArrays(gl.TRIANGLES , 0, 3);
requestAnimationFrame(draw);
}
draw();
---
Here is the GlUtil helper object
export default class GlUtil {
static getGl(canvasId :string ="bilza"):WebGLRenderingContext{
const canvas = document.getElementById(canvasId) as HTMLCanvasElement;
if (canvas == null){
throw new Error("canvas not found");
}
const gl = canvas.getContext("webgl");
if (gl == null) {
throw new Error("Unable to initialize WebGL. Your browser or machine may not support it.");
}
//---Got gl
return gl;
}
static getProgram(gl :WebGLRenderingContext,vshader:WebGLShader, fshader :WebGLShader) :WebGLProgram {
const pgm = gl.createProgram();
if (pgm == null){throw new Error("failed to create program");}
//-----------
gl.attachShader(pgm, vshader);
gl.attachShader(pgm, fshader);
//-------------
// pgm.vertexPosAttrib = gl.getAttribLocation( pgm , 'pos');
// this.gl.useProgram(this.program);
return pgm;
}
static getBuffer(gl :WebGLRenderingContext):WebGLBuffer{
let b = gl.createBuffer();
if (b == null){throw("failed to create buffer");}
return b;
}
static createShader(gl :WebGLRenderingContext, shaderSource :string, shaderType:number):WebGLShader {
var shader = gl.createShader(shaderType);
if (shader == null){
throw new Error("shaders could not be created");
}
gl.shaderSource(shader, shaderSource);
gl.compileShader(shader);
let compiled = gl.getShaderParameter(shader, gl.COMPILE_STATUS);
if (!compiled) {
// There are errors, so display them
var errors = gl.getShaderInfoLog(shader);
console.log('Failed to compile with these errors:' + "type:" + shaderType, errors );
}
return shader;
}
static bindBuffer(gl :WebGLRenderingContext,buff :WebGLBuffer,buffData :number[]){
gl.bindBuffer(gl.ARRAY_BUFFER, buff);
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array(buffData),
gl.STATIC_DRAW);
}
static linkNuseProgram(gl :WebGLRenderingContext,prgrm :WebGLProgram){
gl.linkProgram(prgrm);
gl.useProgram(prgrm);
}
static clear(gl :WebGLRenderingContext,r:number=0,g:number=0,b:number=0,a:number=1){
gl.clearColor(r,g,b,a);
gl.clear(gl.COLOR_BUFFER_BIT);
}
////////////////////////////////////////////////////
static setAttribute(gl :WebGLRenderingContext,nameStr :string,programe :WebGLProgram,numberOfComps :number,stride:number, offset :number=0){
const vertexPosAttrib = gl.getAttribLocation( programe, `${nameStr}`);
gl.enableVertexAttribArray( vertexPosAttrib);
gl.vertexAttribPointer(
vertexPosAttrib, //index
numberOfComps, //number of components =2 x and y
gl.FLOAT, //data type
false, //normalized
stride , //stride - the comple vertex row bytes
offset //offset = 0
);
}
///////////////////////////////////////////////
}
Anf finally here is the picture of the triangle
Tried all the examples and help that I could find on the internet including re-learning the math
Since GL's drawing-area is stretched to fit the viewport, you need to take into account aspect ratio of the view in gl_Position calculation.
For example, appending aspectRatio parameter to the vertex shader,
uniform float aspectRatio;
:
gl_Position = vec4(
(translateX + (a_pos.x * cos(angle) - a_pos.y * sin(angle))) / aspectRatio,
translateY + (a_pos.x * sin(angle) + a_pos.y * cos(angle)),
1.0,
1.0 );
pass the viewport aspect-ratio as follows.
const viewport = gl.getParameter(gl.VIEWPORT); // [x, y, width, height]
const aspectRatioLoc = gl.getUniformLocation(programe, "aspectRatio");
gl.uniform1f(aspectRatioLoc, viewport[2] / viewport[3]);
While exploring javascript, I encountered a question that is quite baffling. The preface is: I convert images of different mime types (mostly pngs/jpgs) into bitmaps with ImageBitmap interface, then transfer them to worker to convert in separate thread into blob(to do so I firstly draw them into offscreen canvas context) and then save into IDB, while main thread continues to load new images. While doing so, to broaden my horizons, I decided to use webgl2 rendering context in the canvas since GL is something I never touched.
To apply bitmap to canvas I use texImage2D function, which I seem to not understand. There I can specify format of data stored in memory being presented to GLSL (it should be rgb(right?) since bitmap was created with no alpha premultiplying), internal format and type. Since the combinations of format/internal format/type are specified by spec, I tried to make use of their multitude and chose the best(quality-/filesize-wise) for my purposes. Since images being converted to bitmap are mostly black and white, I thought that luminance is what I need. But first I used standard RGB format:
gl.texImage2D(
gl.TEXTURE_2D, 0, gl.RGB, bitmap.width, bitmap.height, 0, gl.RGB, gl.UNSIGNED_BYTE, bitmap
);
Then I used RGB565 with UNSIGNED_SHORT_5_6_5 data type and didn't see any quality losses while blob size was decreased by ~30% from RGB. How I understand, it decreased because RGB565 is 2 unsigned short bytes per pixel, right? Then I used UNSIGNED_SHORT_5_5_5_1 RGBA and blob file size compared to standard RGB was decreased by ~43%. Even less then RGB565! But gradients on images became wonky so no 5551RGBA for me. The big difference in size between 5551 RGBA and RGB565 is something I don't understand. And what is more confusing is when using Luminance according to spec type/format/internal format combination, the decrease from standard RGB is only ~5%. Why did RGB565 decreased size for whooping ~30% while luma for a mere ~5%?
For all this I used the same floating-point sampler in fragment shader:
#version 300 es
precision mediump float;
precision mediump sampler2D;
uniform sampler2D sampler;
uniform vec2 dimensions;
out vec4 color;
void main(){
color = texture(sampler, vec2(gl_FragCoord.x/dimensions.x, 1.0 - (gl_FragCoord.y/dimensions.y)));
}
Also the same pixelStorei and texParameteri:
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, false);
gl.pixelStorei(gl.UNPACK_PREMULTIPLY_ALPHA_WEBGL, false);
gl.pixelStorei(gl.UNPACK_COLORSPACE_CONVERSION_WEBGL, gl.NONE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
As shows snippet below, the luma doesn't change the file size of blob if image is black and white, while if colored the decrease is apparent, though still smaller then RGBA4. Quite counterintuitive considering RGBA4 has 2 bytes per pixel, while LUMA - 1.
(async() => {
function createImage(src) {
return new Promise((rs, rj) => {
var img = new Image();
img.crossOrigin = 'anonymous';
img.src = src;
img.onload = () => rs(img);
img.onerror = e => rj(e);
});
};
var jpeg = await createImage('https://upload.wikimedia.org/wikipedia/commons/a/aa/5inchHowitzerFiringGallipoli1915.jpeg');
var png = await createImage('https://upload.wikimedia.org/wikipedia/commons/2/2c/6.d%C3%ADl_html_m2fdede78.png');
var jpgClr = await createImage('https://upload.wikimedia.org/wikipedia/commons/thumb/e/ed/%22Good_bye%2C_sweetheart%22%2C_tobacco_label%2C_ca._1865.jpg/117px-%22Good_bye%2C_sweetheart%22%2C_tobacco_label%2C_ca._1865.jpg');
var format = {
standard: {
internalFormat: 'RGB8',
format: 'RGB',
type: 'UNSIGNED_BYTE',
},
rgb565: {
internalFormat: 'RGB565',
format: 'RGB',
type: 'UNSIGNED_SHORT_5_6_5',
},
rgb9e5: {
internalFormat: 'RGB9_E5',
format: 'RGB',
type: 'FLOAT',
},
srgb: {
internalFormat: 'SRGB8',
format: 'RGB',
type: 'UNSIGNED_BYTE',
},
rgba32f: {
internalFormat: 'RGB32F',
format: 'RGB',
type: 'FLOAT',
},
rgba4: {
internalFormat: 'RGBA4',
format: 'RGBA',
type: 'UNSIGNED_SHORT_4_4_4_4',
},
rgb5a1: {
internalFormat: 'RGB5_A1',
format: 'RGBA',
type: 'UNSIGNED_SHORT_5_5_5_1',
},
luma: {
internalFormat: 'LUMINANCE',
format: 'LUMINANCE',
type: 'UNSIGNED_BYTE',
},
};
function compareFormatSize(image) {
return new Promise((r, _) => {
createImageBitmap(image, {
premultiplyAlpha: 'none',
colorSpaceConversion: 'none',
}).then(async bitmap => {
var text = String(image.src.match(/(?<=\.)\w{3,4}$/)).toUpperCase();
console.log(`${text === 'JPG' ? 'Colored jpg' : text}:`);
for (let val of Object.values(format)) {
await logBlobSize(bitmap, val);
if(val.format === 'LUMINANCE') r();
}
}).catch(console.warn);
});
};
compareFormatSize(jpeg).then(_ => compareFormatSize(png)).then(_ => compareFormatSize(jpgClr));
function logBlobSize(bitmap, { internalFormat, format, type }) {
return new Promise(r => {
drawCanvas(bitmap, internalFormat, format, type).convertToBlob({
type: `image/webp`
}).then(blob => { console.log(`Blob from ${internalFormat} is ${blob.size}b`); r(); });
})
}
function drawCanvas(bitmap, internalFormat, format, type) {
const gl = (new OffscreenCanvas(bitmap.width, bitmap.height)).getContext("webgl2", {
antialias: false,
alpha: false,
depth: false,
});
function createShader(gl, type, glsl) {
const shader = gl.createShader(type);
gl.shaderSource(shader, glsl)
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
console.error(gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
return;
}
return shader;
}
const vs = createShader(
gl,
gl.VERTEX_SHADER,
`#version 300 es
#define POSITION_LOCATION 0
layout(location = POSITION_LOCATION) in vec2 position;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
}`,
);
const fs = createShader(
gl,
gl.FRAGMENT_SHADER,
`#version 300 es
precision mediump float;
precision mediump sampler2D;
uniform sampler2D sampler;
uniform vec2 dimensions;
out vec4 color;
void main()
{
color = texture(sampler, vec2(gl_FragCoord.x/dimensions.x, 1.0 - (gl_FragCoord.y/dimensions.y)));
}`,
);
const program = gl.createProgram();
gl.attachShader(program, vs);
gl.attachShader(program, fs);
gl.linkProgram(program);
const sampler = gl.getUniformLocation(program, 'sampler');
const dimensions = gl.getUniformLocation(program, 'dimensions');
const position = 0; // GLSL location
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
gl.enableVertexAttribArray(position);
const vxBuffer = gl.createBuffer();
const vertices = new Float32Array([
-1.0,-1.0,
1.0,-1.0,
-1.0, 1.0,
1.0, 1.0,
]);
gl.bindBuffer(gl.ARRAY_BUFFER, vxBuffer);
gl.vertexAttribPointer(position, 2, gl.FLOAT, false, 0, 0);
gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW);
const texture = gl.createTexture();
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, false);
gl.pixelStorei(gl.UNPACK_PREMULTIPLY_ALPHA_WEBGL, false);
gl.pixelStorei(gl.UNPACK_COLORSPACE_CONVERSION_WEBGL, gl.NONE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texImage2D(
gl.TEXTURE_2D,
0,
gl[internalFormat],
bitmap.width,
bitmap.height,
0,
gl[format],
gl[type],
bitmap
);
gl.useProgram(program);
gl.uniform1i(sampler, 0);
gl.uniform2f(dimensions, gl.canvas.width, gl.canvas.height);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
gl.deleteTexture(texture);
gl.deleteVertexArray(vao);
gl.deleteBuffer(vxBuffer);
gl.deleteProgram(program);
return gl.canvas;
}
})()
Thanks in advance!
The canvas is always RGBA 8bit (32bit color). There is talk of adding options for having a deeper depth canvas to support HD color displays but that hasn't shipped.
So, calling canvas.converToBlob is always going to give you an RGBA32bit png (or jpeg). You create a LUMIANCE texture will give you a black and white texture but it gets drawn into an RGBA 32bit canvas. There is no option to get a 1 channel PNG.
As for RGB565, RGBA5551 etc those formats may or may not be supported directly by the hardware, the spec allows the driver to choose a format that is a higher resolution and I'm guessing most desktops expand the data into RGBA8 when you upload the data so it won't save any memory.
On the other hand uploading as RGB565 or RGBA5551 the WebGL spec requires that when you pass an image that the image is first converted to that format, so the browser is going to take your image and effectively quantize it down to those color depths which means you're loosing colors. You then draw the quantized image back to the canvas and save so of course it's likely to compress better since there are more similar colors.
From the WebGL spec for the version of texImage2D that takes an ImageBitmap
The source image data is conceptually first converted to the data type and format specified by the format and type arguments, and then transferred to the WebGL implementation. Format conversion is performed according to the following table. If a packed pixel format is specified which would imply loss of bits of precision from the image data, this loss of precision must occur.
Let's try it without WebGL
(async() => {
function createImage(src) {
return new Promise((rs, rj) => {
const img = new Image();
img.crossOrigin = 'anonymous';
img.src = src;
img.onload = () => rs(img);
img.onerror = rj;
});
};
const jpeg = await createImage('https://upload.wikimedia.org/wikipedia/commons/a/aa/5inchHowitzerFiringGallipoli1915.jpeg');
const png = await createImage('https://upload.wikimedia.org/wikipedia/commons/2/2c/6.d%C3%ADl_html_m2fdede78.png');
const jpgClr = await createImage('https://upload.wikimedia.org/wikipedia/commons/thumb/e/ed/%22Good_bye%2C_sweetheart%22%2C_tobacco_label%2C_ca._1865.jpg/117px-%22Good_bye%2C_sweetheart%22%2C_tobacco_label%2C_ca._1865.jpg');
const format = {
standard: {
internalFormat: 'RGB8',
format: 'RGB',
type: 'UNSIGNED_BYTE',
fn: p => [p[0], p[1], p[2], 255],
},
rgb565: {
internalFormat: 'RGB565',
format: 'RGB',
type: 'UNSIGNED_SHORT_5_6_5',
fn: p => [
(p[0] >> 3) * 255 / 31,
(p[1] >> 2) * 255 / 63,
(p[2] >> 3) * 255 / 31,
255,
],
},
rgba4: {
internalFormat: 'RGBA4',
format: 'RGBA',
type: 'UNSIGNED_SHORT_4_4_4_4',
fn: p => [
(p[0] >> 4) * 255 / 15,
(p[1] >> 4) * 255 / 15,
(p[2] >> 4) * 255 / 15,
(p[3] >> 4) * 255 / 15,
],
},
rgb5a1: {
internalFormat: 'RGB5_A1',
format: 'RGBA',
type: 'UNSIGNED_SHORT_5_5_5_1',
fn: p => [
(p[0] >> 3) * 255 / 31,
(p[1] >> 3) * 255 / 31,
(p[2] >> 3) * 255 / 31,
(p[3] >> 7) * 255 / 1,
],
},
luma: {
internalFormat: 'LUMINANCE',
format: 'LUMINANCE',
type: 'UNSIGNED_BYTE',
fn: p => [p[0], p[0], p[0], 255],
},
};
async function compareFormatSize(image) {
const bitmap = await createImageBitmap(image, {
premultiplyAlpha: 'none',
colorSpaceConversion: 'none',
});
const text = String(image.src.match(/(?<=\.)\w{3,4}$/)).toUpperCase();
log(`${text === 'JPG' ? 'Colored jpg' : text}:`);
for (const val of Object.values(format)) {
await logBlobSize(bitmap, val);
}
};
await compareFormatSize(jpeg);
await compareFormatSize(png);
await compareFormatSize(jpgClr);
async function logBlobSize(bitmap, {
internalFormat,
format,
type,
fn,
}) {
const canvas = drawCanvas(bitmap, internalFormat, format, type);
const blob = await canvas.convertToBlob({
type: `image/webp`
});
const canvas2 = drawFn(bitmap, fn);
const blob2 = await canvas2.convertToBlob({
type: `image/webp`
});
log(`Blob from ${internalFormat} is ${blob.size}b(webgl) vs ${blob2.size}b(code)`);
if (false) {
const img = new Image();
img.src = URL.createObjectURL(blob);
document.body.appendChild(img);
const img2 = new Image();
img2.src = URL.createObjectURL(blob2);
document.body.appendChild(img2);
}
}
function drawFn(bitmap, fn) {
const ctx = (new OffscreenCanvas(bitmap.width, bitmap.height)).getContext("2d");
ctx.drawImage(bitmap, 0, 0);
const imageData = ctx.getImageData(0, 0, bitmap.width, bitmap.height);
const pixels = imageData.data;
for (let i = 0; i < pixels.length; i += 4) {
const n = fn(pixels.subarray(i, i + 4));
pixels.set(n, i);
}
ctx.putImageData(imageData, 0, 0);
return ctx.canvas;
}
function drawCanvas(bitmap, internalFormat, format, type) {
const gl = (new OffscreenCanvas(bitmap.width, bitmap.height)).getContext("webgl2", {
antialias: false,
alpha: false,
depth: false,
});
function createShader(gl, type, glsl) {
const shader = gl.createShader(type);
gl.shaderSource(shader, glsl)
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
console.error(gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
return;
}
return shader;
}
const vs = createShader(
gl,
gl.VERTEX_SHADER,
`#version 300 es
#define POSITION_LOCATION 0
layout(location = POSITION_LOCATION) in vec2 position;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
}`,
);
const fs = createShader(
gl,
gl.FRAGMENT_SHADER,
`#version 300 es
precision mediump float;
precision mediump sampler2D;
uniform sampler2D sampler;
uniform vec2 dimensions;
out vec4 color;
void main()
{
color = texture(sampler, vec2(gl_FragCoord.x/dimensions.x, 1.0 - (gl_FragCoord.y/dimensions.y)));
}`,
);
const program = gl.createProgram();
gl.attachShader(program, vs);
gl.attachShader(program, fs);
gl.linkProgram(program);
const sampler = gl.getUniformLocation(program, 'sampler');
const dimensions = gl.getUniformLocation(program, 'dimensions');
const position = 0; // GLSL location
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
gl.enableVertexAttribArray(position);
const vxBuffer = gl.createBuffer();
const vertices = new Float32Array([-1.0, -1.0,
1.0, -1.0, -1.0, 1.0,
1.0, 1.0,
]);
gl.bindBuffer(gl.ARRAY_BUFFER, vxBuffer);
gl.vertexAttribPointer(position, 2, gl.FLOAT, false, 0, 0);
gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW);
const texture = gl.createTexture();
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, false);
gl.pixelStorei(gl.UNPACK_PREMULTIPLY_ALPHA_WEBGL, false);
gl.pixelStorei(gl.UNPACK_COLORSPACE_CONVERSION_WEBGL, gl.NONE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texImage2D(
gl.TEXTURE_2D,
0,
gl[internalFormat],
bitmap.width,
bitmap.height,
0,
gl[format],
gl[type],
bitmap
);
gl.useProgram(program);
gl.uniform1i(sampler, 0);
gl.uniform2f(dimensions, gl.canvas.width, gl.canvas.height);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
gl.deleteTexture(texture);
gl.deleteVertexArray(vao);
gl.deleteBuffer(vxBuffer);
gl.deleteProgram(program);
return gl.canvas;
}
})()
function log(...args) {
const elem = document.createElement('pre');
elem.textContent = [...args].join(' ');
document.body.appendChild(elem);
}
pre { margin: 0; }
Why setting format in gl.texImage2D to gl.LUMINANCE instead of gl.RGB makes the blob made out of the canvas only ~5% smaller in filesize?
I'm not seeing these results. In your example the black and white images stay the same size as RGB vs LUMIANCE. The color image becomes 1/2 size. But of course it depends on the compression algorithm whether or not a black and white 32bit image gets compressed smaller than a color 32bit image since in all cases the canvas is 32bits when convertToBlob is called.
I am fairly new to WebGL and I am working on a 3D game that dynamically generates land around the player. So, I am trying to add vertices to draw in game. Things worked fine, until I started to add this feature, making hundreds of gl.drawArrays() calls per frame, which made it super laggy. After having done some research I found that a better way to approach this is to make a huge array containing all of the vertices (each shape separated by degenerate triangles) and then make one gl.drawArray() call per frame.
Here is the part of my code that runs on load:
function loadGraphics() {
// ground
// buffers
quadVertexPositionBuffer = gl.createBuffer();
vertices = [];
verticesItemCount = 0;
quadVertexColorBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, quadVertexColorBuffer);
var colors = [
0.0, 0.4, 0.0, 1.0,
0.0, 0.4, 0.0, 1.0,
0.0, 0.4, 0.0, 1.0,
0.0, 0.4, 0.0, 1.0,
];
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.DYNAMIC_DRAW);
quadVertexColorBuffer.itemSize = 4;
quadVertexColorBuffer.numItems = 4;
}
Here is the part that runs per frame:
function drawGraphics() {
// draw code for graphics
gl.viewport(0, 0, gl.viewportWidth, gl.viewportHeight);
gl.clearColor(0.35, 0.4, 1.0, 1.0 );
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
mat4.perspective(45, gl.viewportWidth / gl.viewportHeight, 0.1, 100.0, pMatrix);
mat4.identity(mvMatrix);
// perspective
var cameraX = camera.x, cameraY = camera.y, cameraZ = camera.z;
mat4.rotate(mvMatrix, rotMatrix[1], [1, 0, 0]);
mat4.rotate(mvMatrix, rotMatrix[0], [0, 1, 0]);
mat4.translate(mvMatrix, [-cameraX/33, -cameraY/33, -cameraZ/33]);
debug.add("{camera} x:"+camera.x+",y:"+camera.y+",z:"+camera.z+";");
debug.add("\n{mouse delta} x:"+mouse.x-mouse.prevX+",y:"+mouse.y-mouse.prevY+";");
debug.add("\n{rm}[0]:"+rotMatrix[0]+",[1]:"+rotMatrix[1]);
// ground
gl.bindBuffer(gl.ARRAY_BUFFER, quadVertexColorBuffer);
gl.vertexAttribPointer(shaderProgram.vertexColorAttribute, quadVertexColorBuffer.itemSize, gl.FLOAT, false, 0, 0);
// land plots
vertices = [];
verticesItemCount = 0;
for (var i = 0; i < landPlots.length; i++) {
var oX = landPlots[i].x*3;
var oZ = landPlots[i].z*3;
var plotVertices = [
-1.5+oX, 0.0, 1.5+oZ, 1.0,
1.5+oX, 0.0, 1.5+oZ, 1.0,
-1.5+oX, 0.0, -1.5+oZ, 1.0,
1.5+oX, 0.0, -1.5+oZ, 1.0
];
pushDrawArray(plotVertices, 4);
for(var j = 1; j <= 2; j++) {
debug.add(" " + renderLandPlotIntersection(landPlots[i], j));
}
}
gl.bindBuffer(gl.ARRAY_BUFFER, quadVertexPositionBuffer);
gl.vertexAttribPointer(shaderProgram.vertexPositionAttribute, 4, gl.FLOAT, false, 0, 0);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(vertices), gl.DYNAMIC_DRAW);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, verticesItemCount);
}
function renderLandPlotIntersection(landPlot, side) {
var x = landPlot.x;
var z = landPlot.z;
var lvl = landPlot.level;
var olvl = null;
var plot;
switch (side) {
case 0: plot =getLandPlot(x-1, z ); if (plot !== null) olvl = plot.level/66; else return 0; break;
case 1: plot =getLandPlot(x, z+1); if (plot !== null) olvl = plot.level/66; else return 0; break;
case 2: plot =getLandPlot(x+1, z ); if (plot !== null) olvl = plot.level/66; else return 0; break;
case 3: plot =getLandPlot(x, z-1); if (plot !== null) olvl = plot.level/66; else return 0; break;
default: throw "Land plot intersection drawing: side out of range."; return -1;
}
var intersectionVertices = [
x*3, lvl, z*3,
x*3, lvl, z*3,
x*3, olvl, z*3,
x*3, olvl, z*3
];
pushDrawArray(intersectionVertices, 4);
return +1;
}
function pushDrawArray(array, itemCount) {
if (vertices.length > 0) {
// degenerate
vertices.push(vertices[vertices.length-3]);
vertices.push(vertices[vertices.length-2]);
vertices.push(vertices[vertices.length-1]);
vertices.push(array[0]);
vertices.push(array[1]);
vertices.push(array[2]);
verticesItemCount += 2 ;
}
gl.bufferSubData(gl.ARRAY_BUFFER, verticesItemCount*4, array);
verticesItemCount += itemCount;
}
I started using DYNAMIC_DRAW, though I don't really know how to use it. verticesItemCount marks how many vertices are in the vertices array.
gl.drawArrays() returns this error:
[.Offscreen-For-WebGL-060B7ED8]GL ERROR :GL_INVALID_OPERATION : glDrawArrays: attempt to access out of range vertices in attribute 1 localhost/:1 WebGL: too many errors, no more errors will be reported to the console for this context.
How can I fix this code to not cause an error?
I know what I did. I made the vertex position array longer than my vertex color array, so it was trying to access something out of bounds. The fix is to keep the vertex color array the same length as the vertex position array.
I need to implement what amounts to a "Google Maps" style zoom effect in WebGL. Specifically I have a simple 2-dimensional scene that is always perpendicular to the camera. When a user clicks on the scene, the camera should zoom to a location that is over the click, but closer to the 2-dimensional scene.
For example see this jsfiddle, that implements the scene but no zooming:
http://jsfiddle.net/JqBs8/4/
If you have a WebGL enabled browser, you should see a triangle and a square (2-dimensional) rendered at -7 on the Z axis. I have put in a placeholder handleMouseUp() event handler that logs any click events, but I'm a little lost as to how to translate the coordinates given by the click event into a new location for the camera (or I guess equivalently a new view matrix).
(The jsfiddle is based on tutorial 1 from learningwebgl.com and uses the glMatrix http://code.google.com/p/glmatrix/ library for matrix operations. Keep in mind that this is WebGL, which is similar to OpenGL ES, and has no access to glu* functions.)
I've written something in this jsfiddle that should help you.
http://jsfiddle.net/hedzys6r/
(or https://codepen.io/brainjam/pen/gBZyGm)
Just click on the WebGL window to zoom in to where the mouse is pointing.
The basic idea is that a point in the WebGL window is obtained by projecting it from 3-space using the projection matrix pMatrix and the view matrix (the view matrix depends on where the camera is and the direction in which it is looking). The composition of these matrices is named pvMatrix in the code.
If you want the opposite transform from the window back to three space, you have to take a clip space coordinate (x,y,z) and 'unproject' it back into 3D using the inverse of pvMatrix. In clip space, coordinates are in the range [-1,1], and the z coordinate is depth.
In the OpenGL world, these transforms are implemented in gluProject() and gluUnproject() (which you can Google for more information and source code).
In the jsfiddle example, we calculate the (x,y) coordinates in clip space, and then unproject (x,y,z) for two different values of z. From that we get two points in 3D space that map onto (x,y), and we can infer a direction vector. We can then move the camera in that direction to get a zoom effect.
In the code, the camera position is at the negation of the eye vector.
This example shows you how to move the camera in the direction that you are clicking. If you want to actually move towards specific objects in the scene, you have to implement something like object selection, which is a different kettle of fish. The example I've given is unaware of the objects in the scene.
This is really part of brainjam's answer, but just in case that jsfiddle were to go away, I wanted to make sure the code was archived. Here is the primary bit:
function handleMouseUp(event) {
var world1 = [0,0,0,0] ;
var world2 = [0,0,0,0] ;
var dir = [0,0,0] ;
var w = event.srcElement.clientWidth ;
var h = event.srcElement.clientHeight ;
// calculate x,y clip space coordinates
var x = (event.offsetX-w/2)/(w/2) ;
var y = -(event.offsetY-h/2)/(h/2) ;
mat4.inverse(pvMatrix, pvMatrixInverse) ;
// convert clip space coordinates into world space
mat4.multiplyVec4(pvMatrixInverse, [x,y,-1,1], world1) ;
vec3.scale(world1,1/world1[3]) ;
mat4.multiplyVec4(pvMatrixInverse, [x,y,0,1], world2) ;
vec3.scale(world2,1/world2[3]) ;
// calculate world space view vector
vec3.subtract(world2,world1,dir) ;
vec3.normalize(dir) ;
vec3.scale(dir,0.3) ;
// move eye in direction of world space view vector
vec3.subtract(eye,dir) ;
drawScene();
console.log(event)
}
And the entirety of the JS...
var gl;
function initGL(canvas) {
try {
gl = canvas.getContext("experimental-webgl");
gl.viewportWidth = canvas.width;
gl.viewportHeight = canvas.height;
} catch (e) {
}
if (!gl) {
alert("Could not initialise WebGL, sorry :-(");
}
}
function getShader(gl, id) {
var shaderScript = document.getElementById(id);
if (!shaderScript) {
return null;
}
var str = "";
var k = shaderScript.firstChild;
while (k) {
if (k.nodeType == 3) {
str += k.textContent;
}
k = k.nextSibling;
}
var shader;
if (shaderScript.type == "x-shader/x-fragment") {
shader = gl.createShader(gl.FRAGMENT_SHADER);
} else if (shaderScript.type == "x-shader/x-vertex") {
shader = gl.createShader(gl.VERTEX_SHADER);
} else {
return null;
}
gl.shaderSource(shader, str);
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
alert(gl.getShaderInfoLog(shader));
return null;
}
return shader;
}
var shaderProgram;
function initShaders() {
var fragmentShader = getShader(gl, "shader-fs");
var vertexShader = getShader(gl, "shader-vs");
shaderProgram = gl.createProgram();
gl.attachShader(shaderProgram, vertexShader);
gl.attachShader(shaderProgram, fragmentShader);
gl.linkProgram(shaderProgram);
if (!gl.getProgramParameter(shaderProgram, gl.LINK_STATUS)) {
alert("Could not initialise shaders");
}
gl.useProgram(shaderProgram);
shaderProgram.vertexPositionAttribute = gl.getAttribLocation(shaderProgram, "aVertexPosition");
gl.enableVertexAttribArray(shaderProgram.vertexPositionAttribute);
shaderProgram.pMatrixUniform = gl.getUniformLocation(shaderProgram, "uPMatrix");
shaderProgram.mvMatrixUniform = gl.getUniformLocation(shaderProgram, "uMVMatrix");
}
var mvMatrix = mat4.create();
var pMatrix = mat4.create();
function setMatrixUniforms() {
gl.uniformMatrix4fv(shaderProgram.pMatrixUniform, false, pMatrix);
gl.uniformMatrix4fv(shaderProgram.mvMatrixUniform, false, mvMatrix);
}
var triangleVertexPositionBuffer;
var squareVertexPositionBuffer;
function initBuffers() {
triangleVertexPositionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, triangleVertexPositionBuffer);
var vertices = [
0.0, 1.0, 0.0,
-1.0, -1.0, 0.0,
1.0, -1.0, 0.0
];
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(vertices), gl.STATIC_DRAW);
triangleVertexPositionBuffer.itemSize = 3;
triangleVertexPositionBuffer.numItems = 3;
squareVertexPositionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, squareVertexPositionBuffer);
vertices = [
1.0, 1.0, 0.0,
-1.0, 1.0, 0.0,
1.0, -1.0, 0.0,
-1.0, -1.0, 0.0
];
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(vertices), gl.STATIC_DRAW);
squareVertexPositionBuffer.itemSize = 3;
squareVertexPositionBuffer.numItems = 4;
}
var eye = vec3.create([0,0,0]) ; // negation of actual eye position
var pvMatrix = mat4.create();
var pvMatrixInverse = mat4.create();
function drawScene() {
gl.viewport(0, 0, gl.viewportWidth, gl.viewportHeight);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
mat4.perspective(45, gl.viewportWidth / gl.viewportHeight, 0.1, 100.0, pMatrix);
mat4.identity(mvMatrix);
// calculate the view transform mvMatrix, and the projection-view matrix pvMatrix
mat4.translate(mvMatrix, eye);
mat4.multiply(pMatrix,mvMatrix,pvMatrix) ;
mat4.translate(mvMatrix, [-1.5, 0.0, -7.0]);
gl.bindBuffer(gl.ARRAY_BUFFER, triangleVertexPositionBuffer);
gl.vertexAttribPointer(shaderProgram.vertexPositionAttribute, triangleVertexPositionBuffer.itemSize, gl.FLOAT, false, 0, 0);
setMatrixUniforms();
gl.drawArrays(gl.TRIANGLES, 0, triangleVertexPositionBuffer.numItems);
mat4.translate(mvMatrix, [3.0, 0.0, 0.0]);
gl.bindBuffer(gl.ARRAY_BUFFER, squareVertexPositionBuffer);
gl.vertexAttribPointer(shaderProgram.vertexPositionAttribute, squareVertexPositionBuffer.itemSize, gl.FLOAT, false, 0, 0);
setMatrixUniforms();
gl.drawArrays(gl.TRIANGLE_STRIP, 0, squareVertexPositionBuffer.numItems);
}
function handleMouseUp(event) {
var world1 = [0,0,0,0] ;
var world2 = [0,0,0,0] ;
var dir = [0,0,0] ;
var w = event.srcElement.clientWidth ;
var h = event.srcElement.clientHeight ;
// calculate x,y clip space coordinates
var x = (event.offsetX-w/2)/(w/2) ;
var y = -(event.offsetY-h/2)/(h/2) ;
mat4.inverse(pvMatrix, pvMatrixInverse) ;
// convert clip space coordinates into world space
mat4.multiplyVec4(pvMatrixInverse, [x,y,-1,1], world1) ;
vec3.scale(world1,1/world1[3]) ;
mat4.multiplyVec4(pvMatrixInverse, [x,y,0,1], world2) ;
vec3.scale(world2,1/world2[3]) ;
// calculate world space view vector
vec3.subtract(world2,world1,dir) ;
vec3.normalize(dir) ;
vec3.scale(dir,0.3) ;
// move eye in direction of world space view vector
vec3.subtract(eye,dir) ;
drawScene();
console.log(event)
}
function webGLStart() {
var canvas = document.getElementById("lesson01-canvas");
initGL(canvas);
initShaders();
initBuffers();
gl.clearColor(0.0, 0.0, 0.0, 1.0);
gl.enable(gl.DEPTH_TEST);
canvas.onmouseup = handleMouseUp;
drawScene();
}
webGLStart();