opencv.js issue with findTransformECC criteria - javascript

I'm attempting to use opencv.js to align images to a baseline image. I'm following some basic python guidance that i've seen work (example: https://alexanderpacha.com/2018/01/29/aligning-images-an-engineers-solution/)
but i'm getting tripped up with an error that I don't quite understand. The error is "opencv.js:30 Uncaught TypeError: Cannot use 'in' operator to search for 'type' in 1e-10" and it seems to be caused by the "criteria" variable passed to "cv.findTransformECC();" see here.
any guidance as to what I'm doing wrong here?
function Align_img(){
let image_baseline = cv.imread(imgElement_Baseline);
let image = cv.imread('imageChangeup');
let im1_gray = new cv.Mat();
let im2_gray = new cv.Mat();
let im2_aligned = new cv.Mat();
//get size of baseline image
width1 = image_baseline.cols;
height1 = image_baseline.rows;
//resize image to baseline image
let dim1 = new cv.Size(width1, height1);
cv.resize(image, image, dim1, cv.INTER_AREA);
// Convert images to grayscale
cv.cvtColor(image_baseline, im1_gray, cv.COLOR_BGR2GRAY);
cv.cvtColor(image, im2_gray, cv.COLOR_BGR2GRAY);
// Find size of image1
let dsize = new cv.Size(image_baseline.rows, image_baseline.cols);
// Define the motion model
warp_mode = cv.MOTION_HOMOGRAPHY;
// Define 3x3 matrix and initialize the matrix to identity
let warp_matrix = cv.Mat.eye(3, 3, cv.CV_8U);
// Specify the number of iterations.
number_of_iterations = 5000;
// Specify the threshold of the increment in the correlation coefficient between two iterations
termination_eps = 0.0000000001; //1e-10;
// Define termination criteria
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps);
//Run the ECC algorithm. The results are stored in warp_matrix.
cv.findTransformECC(im1_gray, im2_gray, warp_matrix, warp_mode, criteria, null, 5);
// Use warpPerspective for Homography
cv.warpPerspective (image, im2_aligned, warp_matrix, dsize, cv.INTER_LINEAR + cv.WARP_INVERSE_MAP);
cv.imshow('imageChangeup', im2_aligned);
im1_gray.delete();
im2_gray.delete();
im2_aligned.delete();
};
UPDATE: 2 things. 1. Found easy fix to error (code below) and 2. looks like a bug in the findTransformECC opencv.js API causing this method not to work. Here is current code.
The API has 2 optional parameters (inputMask and gaussFiltSize) but if you don't include them you get an error ("function findTransformECC called with 5 arguments, expected 7 args!").
The issue is what to use for inputMask - "null" does not work, there doesn't seem to be support for 'cv.noArray()' and I can't find a mask that doesn't lead to a 'uncaught exception' error.
I'll update again once I find a workaround. Let me know if anyone sees a work around.
function Align_img(){
let image_baseline = cv.imread(imgElement_Baseline);
let image = cv.imread('imageChangeup');
let im1_gray = new cv.Mat();
let im2_gray = new cv.Mat();
let im2_aligned = new cv.Mat();
//get size of baseline image
var width1 = image_baseline.cols;
var height1 = image_baseline.rows;
//resize image to baseline image
let dim1 = new cv.Size(width1, height1);
cv.resize(image, image, dim1, cv.INTER_AREA);
// Convert images to grayscale
cv.cvtColor(image_baseline, im1_gray, cv.COLOR_BGR2GRAY);
cv.cvtColor(image, im2_gray, cv.COLOR_BGR2GRAY);
// Find size of image1
let dsize = new cv.Size(image_baseline.rows, image_baseline.cols);
// Define the motion model
const warp_mode = cv.MOTION_HOMOGRAPHY;
// Define 3x3 matrix and initialize the matrix to identity
let warp_matrix = cv.Mat.eye(3, 3, cv.CV_8U);
// Specify the number of iterations.
const number_of_iterations = 5000;
// Specify the threshold of the increment in the correlation coefficient between two iterations
const termination_eps = 0.0000000001; //1e-10;
// Define termination criteria
//const criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps);
let criteria = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps);
//Run the ECC algorithm. The results are stored in warp_matrix.
//let inputMask = new cv.Mat.ones(im1_gray.size(), cv.CV_8U); //uint8
cv.findTransformECC(im1_gray, im2_gray, warp_matrix, warp_mode, criteria, null, 5);
// Use warpPerspective for Homography
cv.warpPerspective (image, im2_aligned, warp_matrix, dsize, cv.INTER_LINEAR + cv.WARP_INVERSE_MAP);
getMatStats(im2_aligned, 1); //0 = baseline (srcMat), 1 = image (srcMat_compare)
cv.imshow('imageChangeup', im2_aligned);
im1_gray.delete();
im2_gray.delete();
im2_aligned.delete();
};
UPDATE 2 I verified code works fine in Python. code below. The issue at hand now is simply, how do you this in Javascript: "inputMask=None"
Python:
# Read the images to be aligned
im1 = cv2.imread(r"C:\temp\tcoin\69.jpg");
im2 = cv2.imread(r"C:\temp\tcoin\pic96_crop.jpg");
#resize image to compare
width1 = int(im1.shape[1])
height1 = int(im1.shape[0])
dim1 = (width1, height1)
im2 = cv2.resize(im2, dim1, interpolation = cv2.INTER_AREA)
# Convert images to grayscale
im1_gray = cv2.cvtColor(im1,cv2.COLOR_BGR2GRAY)
im2_gray = cv2.cvtColor(im2,cv2.COLOR_BGR2GRAY)
# Find size of image1
sz = im1.shape
# Define the motion model
warp_mode = cv2.MOTION_HOMOGRAPHY
# Define 2x3 or 3x3 matrices and initialize the matrix to identity
warp_matrix = np.eye(3, 3, dtype=np.float32)
# Specify the number of iterations.
number_of_iterations = 5000;
# Specify the threshold of the increment
# in the correlation coefficient between two iterations
termination_eps = 1e-10;
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
(cc, warp_matrix) = cv2.findTransformECC (im1_gray,im2_gray,warp_matrix, warp_mode, criteria, inputMask=None, gaussFiltSize=1)
# Use warpPerspective for Homography
im2_aligned = cv2.warpPerspective (im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
# Show final results
cv2.imshow("Aligned Image 2", im2_aligned)
cv2.imwrite(r"c:\temp\tcoin\output\pic96_cropB.jpg", im2_aligned)
cv2.waitKey(0)

Related

How to perform math operations with OpenCV Matrix and a point-like value in Javascript?

I want to scale a contour in OpenCV.js. I have a valid contour in cnt variable of type cv.Mat (verified it by using drawContours).
I found a function in Python that does everything I need but I have problems converting it to Javascript.
Python version:
def scale_contour(cnt, scale):
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cnt_norm = cnt - [cx, cy]
cnt_scaled = cnt_norm * scale
cnt_scaled = cnt_scaled + [cx, cy]
cnt_scaled = cnt_scaled.astype(np.int32)
return cnt_scaled
Here's what I started for Javascript:
function scaleContour(cnt, scale) {
console.log("cnt", cnt.data32S, cnt.rows, cnt.cols, cnt.type());
const M = cv.moments(cnt);
const cx = M['m10']/M['m00'];
const cy = M['m01']/M['m00'];
const offset = [Math.ceil(cx), Math.ceil(cy)];
console.log("Offset", offset);
// cannot use convenient Python arithmetics here,
// have to call functions
// although technically we have 1 row 2 cols for a point, but the cnt type is 2-channel CV_32SC2 (12)
// therefore keeping the size 1,1 and leave the second dimension as a channel to be compatible with the contour format
const pointMat = cv.matFromArray(1, 1, cnt.type(), offset);
console.log("pointMat", pointMat.data32S);
const cntNorm = new cv.Mat(cnt.rows, cnt.cols, cnt.type());
cv.subtract(cnt, pointMat, cntNorm); <-- my app crashes here with an exception that has only some random number - OpenCV seems to always do that when I'm doing something wrong or it's out of memory
console.log("ctnorm", cntNorm.data32S);
Unfortunately, I cannot find a good example on Python-like matrix operations in the official OpenCV.js documentation on basic data structures. It just shows how to create matrices but does not explain how to perform simple math operations with a matrix and a point-like value.
Also, I'm not sure when I need new cv.Mat(cnt.rows, cnt.cols, cnt.type()); and when new cv.Mat() is enough. The documentation has both but does not answer what is the rule of thumb to use an empty Mat and when it must be configured with row/col/type.
And the log output for cnt cols and rows is confusing, it prints 75 rows and 1 col, but the data is Int32Array(150). I found that sometimes the second layer of values are designated by type and not cols/rows. That's confusing. How should we know when to use rows=1,cols=2 and when rows=1,cols=2 and a type with 2 channels?

Left channel extraction / fade out on WAV file in Javascript

I get a WAV file from user upload (basically a file input) and have to do some manipulation with that:
Validate is it's a valid .wav file
If user uploaded a stereo file, extract a single channel (left)
Add w fade out at the end (50 last samples of the file)
My first thought was hey, there's an api for that (web audio), so I did something similar to:
const source = audioContext.createBufferSource();
const splitter = audioContext.createChannelSplitter(audioBuffer.numberOfChannels);
const gainNode = audioContext.createGain();
source.buffer = audioBuffer;
source.connect(splitter);
gainNode.gain.linearRampToValueAtTime(0, audioBuffer.duration);
splitter.connect(gainNode, 0);
Which in my thinking is taking the first channel out of the source and adding linear fade out (not really on last 50 samples, but that's not a point for now).
But...
How do I extract the output of that into a file? I know how to play manipulated sound frontend side, but am I able to turn it back into a file?
So at some point I assumed there's no way to do that, so I came up with a different solution, which uses low level file manipulation, that goes as follows:
const audioContext = new AudioContext();
// const arrayBuffer = await toArrayBuffer(file);
const audioBuffer = await decodeAudio(audioContext, arrayBuffer);
const channels = 1;
const duration = audioBuffer.duration;
const rate = audioBuffer.sampleRate;
const length = Math.ceil(duration * rate * channels * 2 + 44);
const buffer = new ArrayBuffer(length);
const view = new DataView(buffer);
let position = 0;
let offset = 0;
const setUint16 = (data) => {
view.setUint16(position, data, true);
position += 2;
};
const setUint32 = (data) => {
view.setUint32(position, data, true);
position += 4;
};
setUint32(0x46464952); // RIFF
setUint32(length - 8); // file length
setUint32(0x45564157); // WAV
setUint32(0x20746d66); // fmt
setUint32(16); // data size
setUint16(1); // PCM
setUint16(channels);
setUint32(rate);
setUint32(rate * 16 * channels);
setUint16(channels * 2);
setUint16(16);
setUint32(0x61746164); // "data"
setUint32(length - position - 4);
const leftChannel = audioBuffer.getChannelData(0);
let sample;
console.log('left', leftChannel);
console.log('length', length);
while (position < length) {
sample = leftChannel[offset];
sample = sample < 0 ? sample * 0x8000 : sample * 0x7FFF;
view.setInt16(position, sample, true);
position += 2;
offset++;
}
console.log('buffer', buffer);
const blob = new Blob([buffer], { type: file.type });
but seems it has a lot of flows, output is distorted / has different sample rate and so on...
My question then would be:
How do I extract a file out of a web audio api, if that's even possible? Cause that the best way to do that imho
If (1) is not possible, what am I doing wrong on the second approach?
WebAudio has no way of saving audio to a file. You can use MediaRecorder as one way, but I don't think that that's required to support WAV files. Or you can do it by hand as you show above. At a glance I don't see anything wrong with what you've done. Might be helpful to look at what Chrome does to save files in its test suite; it does basically what you do.

How can I pass a geometry to the Map function in GEE?

I am trying to use the Map function in Google Earth Engine to clip an ImageCollection to a geometry. I have multiple areas of interest (AOIs) and thus would like to apply a generic clip function multiple times (for each AOI). However, when I create a function with two parameters (the image and the geometry) to map over, I get the error image.clip is not a function. When using the function with just one parameter (the image), it works just fine, but then I need to write two (or possible more) functions to do exactly the same task (i.e. clipping an image to a certain geometry).
I have tried the solutions in this post, but they do not work.
Any ideas on how to solve this issue?
Code:
// Get NTL data
var ntl = ee.ImageCollection("NOAA/VIIRS/DNB/MONTHLY_V1/VCMSLCFG");
// Define start and end year
var startYear = 2015;
var endYear = 2018;
// Filter montly and select 'avg_rad' band
var ntlMonthly = ntl.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))
.filter(ee.Filter.calendarRange(1,12,'month'))
.select(['avg_rad']);
// Create geometries of AOIs
// -- Create a geometry of Venezuela
var geomVenezuela = ee.Geometry.Rectangle([-73.341258, 13.363291, -59.637555, -0.372893]);
// -- Create a geometry of Caracas (Venezuela's capital)
var geomCaracas = ee.Geometry.Rectangle([-67.062383, 10.558489, -66.667078, 10.364908]);
// Functions to crop to Venezuela (nationwide) and Caracas (local) resp.
var clipImageToGeometry = function(image, geom) {
return image.clip(geom);
}
// Apply crop function to the ImageCollection
var ntlMonthly_Venezuela = ntlMonthly.map(clipImageToGeometry.bind(null, geomVenezuela));
var ntlMonthly_Caracas = ntlMonthly.map(clipImageToCaracas.bind(null, geomCaracas));
// Convert ImageCollection to single Image (for exporting to Drive)
var ntlMonthly_Venezuela_image = ntlMonthly_Venezuela.toBands();
var ntlMonthly_Caracas_image = ntlMonthly_Caracas.toBands();
// Check geometry in map
Map.addLayer(geomCaracas, {color: 'red'}, 'planar polygon');
Map.addLayer(ntlMonthly_Caracas_image);
// Store scale (m. per pixel) in variable
var VenezuelaScale = 1000;
var CaracasScale = 100;
// Export the image, specifying scale and region.
Export.image.toDrive({
image: ntlMonthly_Caracas_image,
description: 'ntlMonthly_Caracas_'.concat(startYear, "_to_", endYear),
folder: 'GeoScripting',
scale: CaracasScale,
fileFormat: 'GeoTIFF',
maxPixels: 1e9
});
If I understood your question correctly:
If you want to crop each image in the ImageCollection to a geometry, you could just do
var ntlMonthly_Venezuela = ntlMonthly.map(function(image){return ee.Image(image).clip(geomVenezuela)})
And just repeat for other AOIs.
If you wan to cast it into a function:
var clipImageCollection = function(ic, geom){
return ic.map(function(image){return ee.Image(image).clip(geom)})
}
// Apply crop function to the ImageCollection
var ntlMonthly_Venezuela = clipImageCollection(ntlMonthly, geomVenezuela);
var ntlMonthly_Caracas = clipImageCollection(ntlMonthly, geomCaracas);

Clipping one layer to the boundaries of another layer on google earth engine

so I'm trying to clip one layer (NDVI) to the boundaries of corn fields across Ontario. This is the code i have so far but it doesn't seem to be working. I'm not sure if you can actually clip layers to other layers, I know you can fore image collections but some input on how to solve this would be great. Thanks for the help.
var landcover_crops = ee.ImageCollection("AAFC/ACI")
// Load a collection of Landsat TOA reflectance images.
var landsatCollection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA');
// The dependent variable we are modeling.
var dependent = 'NDVI';
// The number of cycles per year to model.
var harmonics = 1;
// Make a list of harmonic frequencies to model.
// These also serve as band name suffixes.
var harmonicFrequencies = ee.List.sequence(1, harmonics);
// Function to get a sequence of band names for harmonic terms.
var constructBandNames = function(base, list) {
return ee.List(list).map(function(i) {
return ee.String(base).cat(ee.Number(i).int());
});
};
// Construct lists of names for the harmonic terms.
var cosNames = constructBandNames('cos_', harmonicFrequencies);
var sinNames = constructBandNames('sin_', harmonicFrequencies);
// Independent variables.
var independents = ee.List(['constant', 't'])
.cat(cosNames).cat(sinNames);
// Function to mask clouds in Landsat 8 imagery.
var maskClouds = function(image) {
var score = ee.Algorithms.Landsat.simpleCloudScore(image).select('cloud');
var mask = score.lt(10);
return image.updateMask(mask);
};
// Function to add an NDVI band, the dependent variable.
var addNDVI = function(image) {
return image
.addBands(image.normalizedDifference(['B5', 'B4'])
.rename('NDVI'))
.float();
};
// Function to add a time band.
var addDependents = function(image) {
// Compute time in fractional years since the epoch.
var years = image.date().difference('2017-01-01', 'year');
var timeRadians = ee.Image(years.multiply(2 * Math.PI)).rename('t');
var constant = ee.Image(1);
return image.addBands(constant).addBands(timeRadians.float());
};
// Function to compute the specified number of harmonics
// and add them as bands. Assumes the time band is present.
var addHarmonics = function(freqs) {
return function(image) {
// Make an image of frequencies.
var frequencies = ee.Image.constant(freqs);
// This band should represent time in radians.
var time = ee.Image(image).select('t');
// Get the cosine terms.
var cosines = time.multiply(frequencies).cos().rename(cosNames);
// Get the sin terms.
var sines = time.multiply(frequencies).sin().rename(sinNames);
return image.addBands(cosines).addBands(sines);
};
};
// Filter to the area of interest, mask clouds, add variables.
var harmonicLandsat = landsatCollection
.filterBounds(geometry2)
.map(maskClouds)
.map(addNDVI)
.map(addDependents)
.map(addHarmonics(harmonicFrequencies));
// The output of the regression reduction is a 4x1 array image.
var harmonicTrend = harmonicLandsat
.select(independents.add(dependent))
.reduce(ee.Reducer.linearRegression(independents.length(), 1));
// Turn the array image into a multi-band image of coefficients.
var harmonicTrendCoefficients = harmonicTrend.select('coefficients')
.arrayProject([0])
.arrayFlatten([independents]);
// Compute fitted values.
var fittedHarmonic = harmonicLandsat.map(function(image) {
return image.addBands(
image.select(independents)
.multiply(harmonicTrendCoefficients)
.reduce('sum')
.rename('fitted'));
});
// Plot the fitted model and the original data at the ROI.
print(ui.Chart.image.series(fittedHarmonic.select(['fitted','NDVI']),
geometry2, ee.Reducer.mean(), 100)
.setOptions({
title: 'Harmonic model: original and fitted values',
lineWidth: 1,
pointSize: 3,
}));
// Pull out the three bands we're going to visualize.
var sin = harmonicTrendCoefficients.select('sin_1');
var cos = harmonicTrendCoefficients.select('cos_1');
// Do some math to turn the first-order Fourier model into
// hue, saturation, and value in the range[0,1].
var magnitude = cos.hypot(sin).multiply(5);
var phase = sin.atan2(cos).unitScale(-Math.PI, Math.PI);
var val = harmonicLandsat.select('NDVI').reduce('mean');
// Turn the HSV data into an RGB image and add it to the map.
var seasonality = ee.Image.cat(phase, magnitude, val).hsvToRgb();
Map.centerObject(geometry2, 11);
Map.addLayer(seasonality, {}, 'Seasonality');
Map.addLayer(geometry2, {}, 'corn_ndvi');
//need to change the image collection into a single image;
var crop2017 = landcover_crops
.filter(ee.Filter.date('2017-01-01', '2017-12-31'))//select for 2017 data
.first(); //collapses the data
//find what band names are, so we can filter the data on the right one
var bandNames = crop2017.bandNames();
print('Band names: ',bandNames);
//then need to select the band of interest...here there's only one band called
landcover
var crop2017_data=crop2017.select('landcover');
//then create various masks by selecting on the land cover value
var urban_mask = crop2017_data.eq(34); //creating the mask
var urban = crop2017_data.mask(urban_mask); //masking the data
var corn_mask = crop2017_data.eq(147);
var corn = crop2017_data.mask(corn_mask);
var soy_mask = crop2017_data.eq(158);
var soy = crop2017_data.mask(soy_mask);
var hay_mask = crop2017_data.eq(122);
var hay = crop2017_data.mask(hay_mask);
var grassland_mask = crop2017_data.eq(110);
var grassland = crop2017_data.mask(grassland_mask);
//Finally, add the masks to the map to make sure they are right
Map.addLayer(urban,undefined,'Urban');
Map.addLayer(corn,undefined,'Corn');
Map.addLayer(soy,undefined,'Soy');
Map.addLayer(hay,undefined,'Hay');
Map.addLayer(grassland,undefined,'Grassland');
//Can clip the mask to show just ontario
var crop2017_ontario = crop2017.clip(ontario);
var corn_ontario = corn.clip(ontario);
var soy_ontario = soy.clip(ontario);
var urban_ontario = urban.clip(ontario);
Map.addLayer(crop2017_ontario,undefined,'All Crops Ontario');
Map.addLayer(corn_ontario,undefined,"Corn Ontario");
Map.addLayer(soy_ontario,undefined,'Soy Ontario');
Map.addLayer(urban_ontario,undefined,'Urban Ontario');
// Composite an image collection and clip it to a boundary.
// Clip to the output image to the Nevada and Arizona state boundaries.
var clipped = seasonality.clipToCollection(corn_ontario);
// Display the result.
Map.setCenter(-80.24, 43.54);
Map.addLayer(clipped, 'clipped composite');
If you would like to clip the NDVI image to the actual bounds of the corn image you can use var clipped = ndvi.clip(corn.geometry()). Every image has a geometry object associated with it and the clip function needs a geometry as an input.
If you want to only keep the NDVI values where the corn is then you should use var masked = ndvi.updateMask(corn). This will only keep NDVI pixels that are also corn pixels.
I tried running your code, it seems that some geometries are not available in the code you posted so it was difficult to identify what exactly was wrong with your code. I hope this helps! If not, just send the link to the code you had working and which line was causing the issue.

Showing cropped image in bokeh

I am showing a picture in a figure in bokeh and am using the BoxSelectTool in order to draw a rectangle.
box_select = BoxSelectTool(callback=callback)
p2 = figure(x_range=(0,700), y_range=(0,500),plot_width=1100,plot_height=1100,tools=[box_select])
p2.image_url( url='url',
x=1, y=1, w=700, h=500, anchor="bottom_left",source=im_src)
rect_source = ColumnDataSource(data=dict(x=[], y=[], width=[], height=[]))
callback = CustomJS(args=dict(rect_source=rect_source), code="""
// get data source from Callback args
var data = rect_source.data;
/// get BoxSelectTool dimensions from cb_data parameter of Callback
var geometry = cb_data['geometry'];
/// calculate Rect attributes
var width = geometry['x1'] - geometry['x0'];
var height = geometry['y1'] - geometry['y0'];
var x = geometry['x0'] + width/2;
var y = geometry['y0'] + height/2;
/// update data source with new Rect attributes
data['x'].push(x);
data['y'].push(y);
data['width'].push(width);
data['height'].push(height);
rect_source.data = data;
rect_source.change.emit();
'''
Now I want to show that image region as cropped in a different, smaller figure, after the rectangle is drawn, without clicking a button or anything:
d2 = figure(x_range=(0,200), y_range=(0,100),plot_width=200,plot_height=100)
d2.image( image='image',
x=1, y=1, dw=100, dh=100, source=img)
img = ColumnDataSource( data=dict(image=[]))
So I need something like this in JS:
tmp_im = cv2.imread('static/' + str(im_nr) + '.jpg')
tmp_im = tmp_im[geometry['y0']:geometry['y1'],geometry['x0']:geometry['x1']]
tmp_im = cv2.cvtColor(tmp_im, cv2.COLOR_BGR2GRAY)
img.data = dict(image=[tmp_im])
How can I do that in JS + bokeh?
I suggest to use the module holoviews (part of the pyviz ecosystem) for this task, which provides a high-level access to bokeh.
Holoviews provides so called streams, which can be used together with DynamicMaps to generate dynamic figures based on the (changing) values of the stream.
The module panel (also part of the pyviz ecosystem) can be used to define layouts for visualization.
import numpy as np
import holoviews as hv
from holoviews import opts
from holoviews.streams import BoundsXY
import panel as pn
pn.extension() # loading the panel extension for use with notebook
opts.defaults(opts.Image(tools=['box_select'])) # making sure, that box_select is available
minval, maxval = 0, 200
# x-y data
ls = np.linspace(minval, 10, maxval)
xx, yy = np.meshgrid(ls, ls)
# z-data, e.g. intensity
zz = xx*yy
# min and max, later used to recalibrate the colormapping
zzmin = zz.min()
zzmax = zz.max()
bounds=(0,0, 1,1) # bounds used for the image
im = hv.Image(zz, bounds=bounds)
# stream, xy-data are provided by the box_select-tool
# As start values the same bounds as for the image are used.
box = BoundsXY(bounds=bounds)
# The box-stream is used to draw a rectangle dynamically
# based on the current selection using the box_select-tool.
rect = hv.DynamicMap(
lambda bounds: hv.Bounds(bounds),
streams=[box])
# The box-stream is used to draw an image dynamically
# based on the current selection using the box_select-tool.
im_select = hv.DynamicMap(
lambda bounds: im[bounds[0]:bounds[2],bounds[1]:bounds[3]],
streams=[box])
# Arranging the layout.
# With redim.range we make sure the colormapping uses the original min- and max-values as in 'im',
# and not the min- and max-values from 'im_select'.
layout = pn.Row(im * rect \
+\
im_select.redim.range(z=(zzmin, zzmax)))
layout.app()

Categories

Resources