I am making a load testing program using K6, JavaScript in which I want to request an API sequentially based on VU id.
For example, let suppose I have 5 different payloads and 5 VU one for each payload.
I want my program to run such as, for the 1st min only VU-1 will get executed requesting continuously with my 1st payload, then for next min, VU-2 will execute and so on till 5th and then again 1st VU will get executed.
Based on the VU Id I will be changing the payload.
I have tried setTimeout() method of JavaScript but not working with k6.
Also I have tried the scenario option from k6 documentation but its running parallelly. here is the code snippet I tried.
export let options = {
scenarios: {
execution: {
scenario1: {
type: "per-vu-iterations",
vus: 1,
iterations: 1,
maxDuration: "15s",
exec: "scenario1",
},
scenario2: {
type: "per-vu-iterations",
vus: 1,
iterations: 1,
maxDuration: "15s",
exec: "scenario2",
},
}
}
}
export function scenario1() {
let res = http.get("https://test.loadimpact.com/");
console.log(__VU);
check(res, {
"is status 200": (r) => r.status === 200
});
};
export function scenario2() {
let res = http.get("https://test.loadimpact.com/");
console.log(__VU);
check(res, {
"is status 200": (r) => r.status === 200
});
};
Is their any method in K6 to achieve this?
One solution for this problem I found, is using the Scenerios options with executer and startTime parameters. But the issue in this solution I am facing is that I am not able to view the metrics result for the individual scenario.
If there is any way to display the metrics do share.
This is a sample code for sequential running.
export let options = {
scenarios: {
scenario_1: {
executor: "constant-vus",
vus: 1,
duration: "1m",
exec: "scenario1",
tags: { test_type: 'api-test1' },
},
scenario_2: {
executor: "constant-vus",
vus: 1,
duration: "1m",
exec: "scenario2",
startTime: "1m",
tags: { test_type: 'api-test2' },
},
},
}
export function scenario1() {
let res = http.get("https://example.com/");
console.log(JSON.stringify(res));
};
export function scenario2() {
let res = http.get("https://example.com/");
console.log(JSON.stringify(res));
};
Related
I have a Cypress test that uses stubbed responses with cy.intercept. The requests that we're intercepting are polling an endpoint in our back end - we make one request per second until a status property in the response has changed.
I'm pretty new to Cypress so I might have the wrong idea about what you can actually test, but what I'd like to check is how often a request is made to this endpoint, i.e. assert that the polling is done at the correct rate (once/sec).
Is this possible to do with Cypress? Or should I perhaps look into some other tool?
This is how we're stubbing the network calls (simplified):
cy.intercept(
{
method: 'GET',
path: '/api/user',
},
{
body: {
id: '1',
status: 'UPDATED', // This is the status that eventually stops the polling
// etc.
},
}
).as('getUserUpdated');
cy.intercept(
{
method: 'GET',
path: '/api/user',
times: 2,
},
{
body: {
id: '1',
status: 'CREATED',
// etc.
},
}
).as('getUserCreated');
Here's a simple script that polls the api for approx 10 seconds at 1 second intervals
<script>
const intervalId = setInterval(() => {
fetch('/api/user')
}, 1000)
setTimeout(() => {
clearInterval(intervalId)
}, 10_000)
</script>
If I want to test that with cy.intercept(), I would basically use the callback form of intercept and inside that record the time.
cy.intercept('/api/user', req => {
polls.push(Date.now() - last)
last = Date.now() // update the last time to record time between polls
req.reply({ id: '1', status: 'UPDATED'})
})
But the first interval time is distorted by the cy.visit() page load, so a second cy.interval() can catch and discard it - but you could just .slice() the polls array too.
Note the order of setting up cy.intercept() - the last one set up is the first one checked by Cypress (then it expires after 1 catch because of {times:1}).
let last = Date.now()
const polls = []
cy.intercept('/api/user', req => {
polls.push(Date.now() - last)
last = Date.now()
req.reply({ id: '1', status: 'UPDATED'})
})
// This is just to consume the first call
// since the `cy.visit()` distorts the interval
cy.intercept('/api/user', { times: 1 }, req => {
last = Date.now()
req.reply({ id: '1', status: 'UPDATED'})
})
cy.visit('/');
cy.wait(10_000)
.then(() => {
cy.wrap(polls)
.each(poll => {
expect(poll).to.be.closeTo(1000, 50) // best check is +/- 50 ms
})
})
Given I have the following configuration:
let config = {
"requests": [
{
"resource": "foo",
"interval": 1000
},
{
"resource": "bar",
"interval": 500
},
{
"resource": "baz",
"interval": 3000
},
{
"resource": "qux",
"interval": 500
},
{
"resource": "zot",
"interval": 500
},
],
// other configs...
}
I need to make a recursive setTimeout calls where I check what resources should be requested from a server at a given call and make the request to the server.
For example, considering the array above:
After 500ms, since it's the smallest interval, I have to make a request and pass array of ['bar', 'qux', 'zot']. After another 500ms, and since it's already 1000ms, the array should be ['bar', 'qux', 'zot', 'foo']. After another 500 it should be again ['bar', 'qux', 'zot']. When reaches 3000 - ['bar', 'qux', 'zot', 'foo', 'baz'], and so on...
The configuration itself is coming from a server when the app starts and it's a black box to me. Meaning I can't know exactly what intervals may be configured and how many of them are there. Here I made them increase by 500ms for convenience only (though I think I might make it a technical requirement to the back-end guys, lets assume I can't).
I'm not sure how to tackle this problem. I thought maybe I should store an array of required intervals and do something with that array. Perhaps store a current request as an object with timestamp and interval. Something like that:
const { requests } = config
let intervals = new Set()
for (let obj of requests) {
intervals.add(obj.interval)
}
intervals = Array.from(intervals).sort((a, b) => a - b) //[500, 1000, 3000]
let currentRequest
const makeRequest = resources => {
// send request to server with given resources array
// init the next request by calling initRequest again
}
const initRequest = () => {
const interval = 500 // just for example, the actual interval should be determind here
const resources = [] // Here I should determine what should be the requested resources
setTimeout(() => {
makeRequest(resources)
}, interval)
}
initRequest()
But I'm not sure about the logic here. How can this be done? Any ideas, please?
We create an upcoming array, which stores the next timestamp at which each resource should be retrieved. Then, we know that we need to set a timeout that triggers at the time of the next upcoming timestamp.
When the timeout triggers, we execute upcoming items that are scheduled to run right now. We then calculate the next time it should be run.
Finally, we set a new timeout to coincide with the next time a task is scheduled to run.
const config = {
requests: [
{ resource: 'foo', interval: 1000 },
{ resource: 'bar', interval: 500 },
{ resource: 'baz', interval: 3000 },
{ resource: 'qux', interval: 500 },
{ resource: 'zot', interval: 500 }
]
};
let fireAllAtTimeZero = false;
let t = 0;
let upcoming = config.requests.map(
i=>({...i, next: fireAllAtTimeZero ? 0 : i.interval}));
f = () => {
let now = upcoming.filter(i=>i.next===t);
console.log(`time = ${t}, doing: ${now.map(i=>i.resource).join()}`);
// initiate your async request here for every item in 'now' array
now.forEach(i=>i.next = t + i.interval);
let next = upcoming.map(i=>i.next).sort((a,b)=>a-b)[0];
let delay = next-t;
t = next;
setTimeout(f, delay);
};
f();
You could use setTimeouts:
initRequests = () => {
let i = -1;
const resources = [];
const timeoutHandler = () => {
if(i < intervals.length){
setTimeout(timeoutHandler, intervals[++i]);
}
}
timeoutHandler();
}
I currently have the below code setup to retrieve some data from a MongoDB database and I would like to make an IF function that runs on the documents received. I am trying to have an IF function that runs if the price field in the documents are 10% higher than event.payload.base_price otherwise move on, I am fairly new to coding and MongoDB so can't figure this one out as I am not sure how to write this function. Any help would be incredibly appreciated.
async function run() {
try {
const query = { contract: idSplit[1] , id: idSplit[2] };
const options = {
sort: { name: 1 },
projection: {_id: 0, slug: 1, contract: 1, id: 1, price: 1},
};
const cursor = listings.find(query, options);
await cursor.forEach(console.dir);
} finally {
await client.close();
}
}
run().catch(console.dir);
After you get the array In the cursor, you can use below if conditions in the forEach Loop:
cursor.forEach((element) =>
{
if (element.price > event.payload.base_price + event.payload.base_price * 0.1) {
console.log("Event is happening", element.price);
} else {
console.log("Event is not happening", element.price)
}
});
In the project that I am working on, built using nodejs & mongo, there is a function that takes in a query and returns set of data based on limit & offset provided to it. Along with this data the function returns a total count stating all the matched objects present in the database. Below is the function:
// options carry the limit & offset values
// mongoQuery carries a mongo matching query
function findMany(query, options, collectionId) {
const cursor = getCursorForCollection(collectionId).find(query, options);
return Promise.all([findManyQuery(cursor), countMany(cursor)]);
}
Now the problem with this is sometime when I give a large limit size I get an error saying:
Uncaught exception: TypeError: Cannot read property '_killCursor' of undefined
At first I thought I might have to increase the pool size in order to fix this issue but after digging around a little bit more I was able to find out that the above code is resulting in a race condition. When I changed the code to:
function findMany(query, options, collectionId) {
const cursor = getCursorForCollection(collectionId).find(query, options);
return findManyQuery(cursor).then((dataSet) => {
return countMany(cursor).then((count)=> {
return Promise.resolve([dataSet, count]);
});
);
}
Everything started working perfectly fine. Now, from what I understand with regard to Promise.all was that it takes an array of promises and resolves them one after the other. If the promises are executed one after the other how can the Promise.all code result in race condition and the chaining of the promises don't result in that.
I am not able to wrap my head around it. Why is this happening?
Since I have very little information to work with, I made an assumption of what you want to achieve and came up with the following using Promise.all() just to demonstrate how you should use Promise.all (which will resolve the array of promises passed to it in no particular order. For this reason, there must be no dependency in any Promise on the order of execution of the Promises. Read more about it here).
// A simple function to sumulate findManyQuery for demo purposes
function findManyQuery(cursors) {
return new Promise((resolve, reject) => {
// Do your checks and run your code (for example)
if (cursors) {
resolve({ dataset: cursors });
} else {
reject({ error: 'No cursor in findManyQuery function' });
}
});
}
// A simple function to sumulate countMany for demo purposes
function countMany(cursors) {
return new Promise((resolve, reject) => {
// Do your checks and run your code (for example)
if (cursors) {
resolve({ count: cursors.length });
} else {
reject({ error: 'No cursor in countMany' });
}
});
}
// A simple function to sumulate getCursorForCollection for demo purposes
function getCursorForCollection(collectionId) {
/*
Simulating the returned cursor using an array of objects
and the Array filter function
*/
return [{
id: 1,
language: 'Javascript',
collectionId: 99
}, {
id: 2,
language: 'Dart',
collectionId: 100
},
{
id: 3,
language: 'Go',
collectionId: 100
}, {
id: 4,
language: 'Swift',
collectionId: 99
}, {
id: 5,
language: 'Kotlin',
collectionId: 101
},
{
id: 6,
language: 'Python',
collectionId: 100
}].filter((row) => row.collectionId === collectionId)
}
function findMany(query = { id: 1 }, options = [], collectionId = 0) {
/*
First I create a function to simulate the assumed use of
query and options parameters just for demo purposes
*/
const filterFunction = function (collectionDocument) {
return collectionDocument.collectionId === query.id && options.indexOf(collectionDocument.language) !== -1;
};
/*
Since I am working with arrays, I replaced find function
with filter function just for demo purposes
*/
const cursors = getCursorForCollection(collectionId).filter(filterFunction);
/*
Using Promise.all([]). NOTE: You should pass the result of the
findManyQuery() to countMany() if you want to get the total
count of the resulting dataset
*/
return Promise.all([findManyQuery(cursors), countMany(cursors)]);
}
// Consuming the findMany function with test parameters
const query = { id: 100 };
const collectionId = 100;
const options = ['Javascript', 'Python', 'Go'];
findMany(query, options, collectionId).then(result => {
console.log(result); // Result would be [ { dataset: [ [Object], [Object] ] }, { count: 2 } ]
}).catch((error) => {
console.log(error);
});
There are ways to write this function in a "pure" way for scalability and testing.
So here's your concern:
In the project that I am working on, built using nodejs & mongo, there is a function that takes in a query and returns set of data based on limit & offset provided to it. Along with this data the function returns a total count stating all the matched objects present in the database.
Note: You'll need to take care of edge case.
const Model = require('path/to/model');
function findManyUsingPromise(model, query = {}, offset = 0, limit = 10) {
return new Promise((resolve, reject) => {
model.find(query, (error, data) => {
if(error) {
reject(error);
}
resolve({
data,
total: data.length || 0
});
}).skip(offset).limit(limit);
});
}
// Call function
findManyUsingPromise(Model, {}, 0, 40).then((result) => {
// Do something with result {data: [object array], total: value }
}).catch((err) => {
// Do something with the error
});
I'm trying to create an observable that produces values from a number of asynchronous actions (http requests from a Jenkins server), that will let a subscriber know once all the actions are completed. I feel like I must be misunderstanding something because this fails to do what I expect.
'use strict';
let Rx = require('rx');
let _ = require('lodash');
let values = [
{'id': 1, 'status': true},
{'id': 2, 'status': true},
{'id': 3, 'status': true}
];
function valuesObservable() {
return Rx.Observable.create(function(observer) {
_.map(values, function(value) {
var millisecondsToWait = 1000;
setTimeout(function() { // just using setTimeout here to construct the example
console.log("Sending value: ", value);
observer.onNext(value)
}, millisecondsToWait);
});
console.log("valuesObservable Sending onCompleted");
observer.onCompleted()
});
}
let observer = Rx.Observer.create((data) => {
console.log("Received Data: ", data);
// do something with the info
}, (error) => {
console.log("Error: ", error);
}, () => {
console.log("DONE!");
// do something else once done
});
valuesObservable().subscribe(observer);
Running this, I get output:
valuesObservable Sending onCompleted
DONE!
Sending value: { id: 1, status: true }
Sending value: { id: 2, status: true }
Sending value: { id: 3, status: true }
While what I would like to see is something more like:
Sending value: { id: 1, status: true }
Received Data: { id: 1, status: true }
Sending value: { id: 2, status: true }
Received Data: { id: 2, status: true }
Sending value: { id: 3, status: true }
Received Data: { id: 3, status: true }
valuesObservable Sending onCompleted
DONE!
I don't actually care about the order of the items in the list, I would just like the observer to receive them.
I believe what is happening is that Javascript asynchronously fires the timeout function, and proceeds immediately to the observer.onCompleted() line. Once the subscribing observer receives the onCompleted event (is that the right word?), it decides that it's done and disposes of itself. Then when the async actions complete and the observable fires onNext, the observer no longer exists to take any actions with them.
If I'm right about this, I'm still stumped about how to make it behave in the way I would like. Have I stumbled into an antipattern without realising it? Is there a better way of approaching this whole thing?
Edit:
Since I used setTimeout to construct my example, I realised I can use it to partially solve my problem by giving the observable a timeout.
function valuesObservable() {
return Rx.Observable.create(function(observer) {
let observableTimeout = 10000;
setTimeout(function() {
console.log("valuesObservable Sending onCompleted");
observer.onCompleted();
}, observableTimeout);
_.map(values, function(value) {
let millisecondsToWait = 1000;
setTimeout(function() {
console.log("Sending value: ", value);
observer.onNext(value)
}, millisecondsToWait);
});
});
}
This gets me all of the information from the observable in the order I want (data, then completion) but depending on the choice of timeout, I either may miss some data, or have to wait a long time for the completion event. Is this just a inherent problem of asynchronous programming that I have to live with?
Yes there is a better way. The problem right now is that you are relying on time delays for your synchronization when in fact you can use the Observable operators to do so instead.
The first step is to move away from directly using setTimeout. Instead use timer
Rx.Observable.timer(waitTime);
Next you can lift the values array into an Observable such that each value is emitted as an event by doing:
Rx.Observable.from(values);
And finally you would use flatMap to convert those values into Observables and flatten them into the final sequence. The result being an Observable that emits each time one of the source timers emits, and completes when all the source Observables complete.
Rx.Observable.from(values)
.flatMap(
// Map the value into a stream
value => Rx.Observable.timer(waitTime),
// This function maps the value returned from the timer Observable
// back into the original value you wanted to emit
value => value
)
Thus the complete valuesObservable function would look like:
function valuesObservable(values) {
return Rx.Observable.from(values)
.flatMap(
value => Rx.Observable.timer(waitTime),
value => value
)
.do(
x => console.log(`Sending value: ${value}`),
null,
() => console.log('Sending values completed')
);
}
Note the above would work as well if you weren't using demo stream, i.e. if you had really http streams you could even simplify by using merge (or concat to preserve order)
Rx.Observable.from(streams)
.flatMap(stream => stream);
// OR
Rx.Observable.from(streams).merge();
// Or simply
Rx.Observable.mergeAll(streams);
The best way to construct an observable is to use the existing primitive and then a combination of the existing operators. This avoids a few headaches (unsubscription, error management etc.). Then Rx.Observable.create is certainly useful when nothing else fits your use case. I wonder if generateWithAbsoluteTime would fit.
Anyways, here the issue you run into is that you complete your observer before you send him data. So basically you need to come up with a better completion signal. Maybe :
complete x seconds after last value emitted if no new value is emitted
complete when a value is equal to some 'end' value
With thanks to #paulpdaniels, this is the final code that did what I wanted, including the calls to Jenkins:
'use strict';
let Rx = require('rx');
let jenkinsapi = require('jenkins'); // https://github.com/silas/node-jenkins/issues
let jenkinsOpts = {
"baseUrl": "http://localhost:8080",
"options": {"strictSSL": false},
"job": "my-jenkins-job",
"username": "jenkins",
"apiToken": "f4abcdef012345678917a"
};
let jenkins = jenkinsapi(JSON.parse(JSON.stringify(jenkinsOpts)));
function jobInfoObservable(jenkins, jobName) {
// returns an observable with a containing a single list of builds for a given job
let selector = {tree: 'builds[number,url]'};
return Rx.Observable.fromNodeCallback(function(callback) {
jenkins.job.get(jobName, selector, callback);
})();
}
function buildIDObservable(jenkins, jobName) {
// returns an observable containing a stream of individual build IDs for a given job
return jobInfoObservable(jenkins, jobName).flatMap(function(jobInfo) {
return Rx.Observable.from(jobInfo.builds)
});
}
function buildInfoObservable(jenkins, jobName) {
// returns an observable containing a stream of http response for each build in the history for this job
let buildIDStream = buildIDObservable(jenkins, jobName);
let selector = {'tree': 'actions[parameters[name,value]],building,description,displayName,duration,estimatedDuration,executor,id,number,result,timestamp,url'};
return buildIDStream.flatMap(function(buildID) {
return Rx.Observable.fromNodeCallback(function(callback) {
jenkins.build.get(jobName, buildID.number, selector, callback);
})();
});
}
let observer = Rx.Observer.create((data) => {
console.log("Received Data: ", data);
// do something with the info
}, (error) => {
console.log("Error: ", error);
}, () => {
console.log("DONE!");
// do something else once done
});
buildInfoObservable(jenkins, jenkinsOpts.job).subscribe(observer);
By relying on the Rx built-in operators I managed to avoid messing about with timing logic altogether. This is also much cleaner than nesting multiple Rx.Observable.create statements.