I am trying to use SNS located in São Paulo (sa-east-1) from a lambda function (Node.js 8.10) on Ohio (us-east-2). This is the first time I try to use a AWS service located in another region. So far, this is what I am doing:
//init aws resources
const AWS = require('aws-sdk');
const sns = new AWS.SNS({apiVersion: '2010-03-31', region: 'sa-east-1'});
//promisefy AWS.SNS.createPlatformEndpoint method
snsCreatePlatformEndpoint = params => new Promise(
(resolve, reject)=>{
sns.createPlatformEndpoint(params, function(error, data){
if (error) { reject(error); }
else { resolve(data); }
});
}
);
exports.handler = (awsEvent, context, callback) => {
//parse stuff in here
...
HandleToken(token, callback);
};
async function HandleToken(token, callback){
try{
let params = {
PlatformApplicationArn: process.env.PlatAppArn,
Token: token,
};
console.log('params:', params); // this prints as expected
let {EndpointArn} = await snsCreatePlatformEndpoint(params);
console.log('It should pass through here'); // it is not printed
//returns a success response
...
} catch (error) {
//returns an error response
...
}
}
I have set a really high timeout for my lambda function: 5mins.
I also have tested the same code on a lambda function located in São Paulo(sa-east-1), and it works.
I have been receiving the following error on my client:
"Request failed with status code 504"
"Endpoint request timed out"
Question: How can I use SNS in another AWS region correctly?
You shouldn't need to do any special setup beyond setting the region.
E.g., I use the following pattern to send notifications from us-east-1 to Tokyo (ap-northeast-1):
// this lambda runs in us-east-1
let AWS = require("aws-sdk");
AWS.config.update({ region: "ap-northeast-1" }); // asia-pacific region
exports.handler = async (event, context) => {
var params = {
Message: 'my payload',
TopicArn: 'arn:aws:sns:ap-northeast-1:xxxxxx:tokyoSNS'
};
let SNS = new AWS.SNS({apiVersion: '2010-03-31'});
var data = await SNS.publish(params).promise();
// check if successful then return
}
No endpoints, etc., was setup. Are you required to run your lambda in a VPC? That's the only complication I can think of at the moment.
Related
AWS lambda function get timeout without any error message When it connect S3 SDK sometimes.
When try to S3.getObject(), Lambda function stop until timeout without any error message.
Only sometimes, I get this happen even I use same parameter, same code.
I used try catch code already. If something wrong, the code make error msg but, It was not.
The point is that the lambda function makes problems 'sometimes'.
Normally, It works well. But, It stop suddenly.
Please help me this problem
enter image description here
enter image description here
enter image description here
2020-07-30T16:45:22.003+09:00
START RequestId: 2c1dcd43-41f6-5d95-a862-9adf3d267ecf Version: $LATEST
2020-07-30T16:45:22.009+09:00
2020-07-30T07:45:22.009Z 2c1dcd43-41f6-5d95-a862-9adf3d267ecf INFO fileKeys : [ '701bcf80-ed63-46d4-9524-d38a14a74fe0.dwg' ]
2020-07-30T16:45:22.009+09:00
2020-07-30T07:45:22.009Z 2c1dcd43-41f6-5d95-a862-9adf3d267ecf INFO getObject drawingFile 701bcf80-ed63-46d4-9524-d38a14a74fe0.dwg
2020-07-30T16:45:22.019+09:00
2020-07-30T07:45:22.018Z 2c1dcd43-41f6-5d95-a862-9adf3d267ecf INFO before getObject
2020-07-30T16:45:22.020+09:00
2020-07-30T07:45:22.020Z 2c1dcd43-41f6-5d95-a862-9adf3d267ecf INFO before createReadStream
2020-07-30T16:45:22.021+09:00
2020-07-30T07:45:22.021Z 2c1dcd43-41f6-5d95-a862-9adf3d267ecf INFO before pipe
2020-07-30T16:45:52.032+09:00
END RequestId: 2c1dcd43-41f6-5d95-a862-9adf3d267ecf
2020-07-30T16:45:52.032+09:00
REPORT RequestId: 2c1dcd43-41f6-5d95-a862-9adf3d267ecf Duration: 30027.38 ms Billed Duration: 30000 ms Memory Size: 3008 MB Max Memory Used: 31 MB
2020-07-30T16:45:52.032+09:00
2020-07-30T07:45:52.032Z 2c1dcd43-41f6-5d95-a862-9adf3d267ecf Task timed out after 30.03 seconds
Here is the code
exports.getObject = async (type, fileKey) => {
try {
const config = env[type]();
const s3 = new AWS.S3();
return new Promise((resolve, reject) => {
const fileName = `/tmp/${fileKey}`;
const fileStream = fs.createWriteStream(fileName, 'binary');
const s3Stream = s3
.getObject({
Bucket: config.origin,
Key: fileKey,
})
.createReadStream();
s3Stream
.on('error', function (err) {
console.error('s3Stream : ', err);
fileStream.end();
reject(err);
})
.on('data', (data) => {
// console.log('data stream...');
})
.on('end', () => {
console.log('s3Stream read end');
});
fileStream
.on('error', function (err) {
console.error('fileStream : ', err);
fileStream.end();
reject(err);
})
.on('close', function () {
console.log('fileStream Done.');
resolve('success');
});
s3Stream.pipe(fileStream)
.on('error', function (err) {
console.error('File Stream:', err);
reject(err);
})
.on('close', function () {
console.log('Pipe Done.');
});;
});
} catch (error) {
throw (error);
}
};
It sounds like your Lambda function is connected to a mixture of private and public subnets.
When calling AWS services, Internet access is required. This can either be obtained via:
Connecting the Lambda function to private subnets and having a NAT Gateway in the public subnet(s), with the Route Table of the private subnets sending traffic destined for 0.0.0.0/0 to the NAT Gateway, or
Use VPC Endpoints within the VPC to allow direct connectivity to the AWS service(s)
If the Lambda function connects to a public subnet, it will not be able to use the NAT Gateway. This will cause connections to AWS to timeout. The random behaviour might be related to which subnet the Lambda function is using.
What I'm trying to do is create a lambda function where the function calls two commands on an ec2 instance. When I had trouble running this code in a lambda function, I removed the code from the exports.handler() method and ran the code in a standalone node.js file in the same ec2 instance and I was able to get the code to work. The command I ran was 'node app.js'.
exports.handler = async (event) => {
const AWS = require('aws-sdk')
AWS.config.update({region:'us-east-1'});
var ssm = new AWS.SSM();
var params = {
DocumentName: 'AWS-RunShellScript', /* required */
InstanceIds: ['i-xxxxxxxxxxxxxxxx'],
Parameters: {
'commands': [
'mkdir /home/ec2-user/testDirectory',
'php /home/ec2-user/helloWorld.php'
/* more items */
],
/* '<ParameterName>': ... */
}
};
ssm.sendCommand(params, function(err, data) {
if (err) {
console.log("ERROR!");
console.log(err, err.stack); // an error occurred
}
else {
console.log("SUCCESS!");
console.log(data);
} // successful response
});
const response = {
statusCode: 200,
ssm: ssm
};
return response;
};
I figured that it could have been a permissions related issue, but the lambda is apart of the same vpc that the ec2 instance is in.
You're trying to combine async/await with callbacks. That won't work in a lambda AWS Lambda Function Handler in Node.js. The reason it's working locally, or in a node server, is because the server is still running when the function exits, so the callback still happens. In a Lambda the node process is gone as soon as the lambda exits if you are using async (or Promises), so the callback is not able to be fired.
Solution based on Jason's Answer:
const AWS = require('aws-sdk');
const ssm = new AWS.SSM();
exports.handler = async (event,context) => {
AWS.config.update({region:'us-east-1'});
const params = {
DocumentName: 'AWS-RunShellScript', /* required */
InstanceIds: ['i-xxxxxxxxxxxxxx'],
Parameters: {
'commands': [
'mkdir /home/ec2-user/testDirectory',
'php /home/ec2-user/helloWorld.php'
/* more items */
],
/* '<ParameterName>': ... */
}
};
const ssmPromise = new Promise ((resolve, reject) => {
ssm.sendCommand(params, function(err, data) {
if (err) {
console.log("ERROR!");
console.log(err, err.stack); // an error occurred
context.fail(err);
}
else {
console.log("SUCCESS!");
console.log(data);
context.succeed("Process Complete!");
} // successful response
});
});
console.log(ssmPromise);
const response = {
statusCode: 200,
ssm: ssm
};
return response;
};
I have developed Google Cloud Function which calls an API hosted in AZURE.
However the function returns error
Error: function crashed.Details:
getaddrinfo ENOTFOUND https://bupanonproduction.azure-api.net https://bupanonproduction.azure-api.net:443
Below is the google cloud function
'use strict';
const http = require('https');
const host = 'https://bupanonproduction.azure-api.net';
exports.remaininglimits = (req, res) => {
// Call the API
callRemainingLimitsApi().then((output) => {
// Return the results from the API to API.AI
res.setHeader('Content-Type', 'application/json');
res.send(JSON.stringify({ 'speech': output, 'displayText': output }));
}).catch((error) => {
// If there is an error let the user know
res.setHeader('Content-Type', 'application/json');
res.send(JSON.stringify({ 'speech': error, 'displayText': error }));
});
};
function callRemainingLimitsApi () {
return new Promise((resolve, reject) => {
// Create the path for the HTTP request to get the weather
let path = '/api/Values';
console.log('API Request: ' + host + path);
// Make the HTTP request to get the weather
http.get({host: host, path: path, headers: {'Ocp-Apim-Subscription-Key':'0a6e2fa822ec4d7a821d7f286abb6990'}}, (res) => {
let body = ''; // var to store the response chunks
res.on('data', (d) => { body += d; }); // store each response chunk
res.on('end', () => {
// After all the data has been received parse the JSON for desired data
let response = JSON.parse(body);
let jasonString = JSON.stringify(response);
// Create response
let output = `Hi, your limit is ${jasonString}.`;
// Resolve the promise with the output text
console.log(output);
resolve(output);
});
res.on('error', (error) => {
reject(error);
});
});
});
}
When I use other public API such as below it returns correct result to the cloud function.
https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MSFT&apikey=demo
Any idea why the cloud function not recognizing the AZURE API url?
-Alan-
I just found out that the host should be defined without the prefix "https". That fixed the problem. I am using Free Trial with $300 credit and I am not sure if this is considered a paid plan.
I implemented this AWS Lambda that receives events from slack and response back to slack a sentence and I want to monitor their answer back to the lambda to verify that the message arrived and posted.
// Lambda handler
exports.handler = (data, context, callback) => {
switch (data.type) {
case "url_verification": verify(data, callback); break;
case "event_callback": process(data.event, callback); break;
default: callback(null);
}
};
// Post message to Slack - https://api.slack.com/methods/chat.postMessage
function process(event, callback) {
// test the message for a match and not a bot
if (!event.bot_id && /(aws|lambda)/ig.test(event.text)) {
var text = `<#${event.user}> isn't AWS Lambda awesome?`;
var message = {
token: ACCESS_TOKEN,
channel: event.channel,
text: text
};
var query = qs.stringify(message); // prepare the querystring
https.get(`https://slack.com/api/chat.postMessage?${query}`);
}
callback(null);
}
I want to know how can I get the response of my HTTPS request (that send to me by slack) back to my lambda?
If I understood correctly you want to wait for the result of the your get query.
In your code callback is called immediately and lambda finishes its execution.
To be able to wait for the response you need to remove callback from its current position in the code and call it after request was performed.
// Post message to Slack - https://api.slack.com/methods/chat.postMessage
function process(event, callback) {
// test the message for a match and not a bot
if (!event.bot_id && /(aws|lambda)/ig.test(event.text)) {
var text = `<#${event.user}> isn't AWS Lambda awesome?`;
var message = {
token: ACCESS_TOKEN,
channel: event.channel,
text: text
};
var query = qs.stringify(message); // prepare the querystring
https.get(`https://slack.com/api/chat.postMessage?${query}`, (res, err) => {
if (err) return callback(err);
callback(null);
})
}
// callback was here
}
If you can, use request/request-promise to save some lines of code.
To get the http response in your Lambda Function you just need to wait for the response before calling the Lambda Callback.
Eg.:
var request = require('request-promise');
exports.handler = (event, context, callback) => {
request('https://somedomain.com').then((body) => {
//got the response body
callback(null, body);
});
}
It's the same idea if you're using the https module.
This is the code I have for publishing to the topic (I have changed the target and topic arn for security reasons):
var AWS = require("aws-sdk");
var sns = new AWS.SNS();
var targetArn = 'arn:aws:sns:us-east-1:4363657289:endpoint/GCM/APP_NAME/3185sfdnfe283925sgSeaa0e';
var topicArn = 'arn:aws:s-s:us-east-1:4363657289436:TOPIC_NAME';
var payload = {
GCM: {
data: {
message: "test"
}
}
};
payload.GCM = JSON.stringify(payload.GCM);
payload = JSON.stringify(payload);
var params= {
TopicArn: topicArn,
TargetArn: targetArn,
Message: payload,
MessageStructure: 'json'
};
var responsefromSNS = sns.publish(params , function(error, data) {
if (error) {
console.log("ERROR: " + error.stack);
}
else {
console.log("SENT DATA: " + JSON.stringify(data));
context.done(null, data);
}
});
console.log(responsefromSNS);
My issue is that I never see log statements from either the if or else block and the push notification never reaches the mobile app. I have consulted both the AWS JavaScript SDK Documentation and countless stack overflow posts about this and nothing that I have tried works. And, I have given the lambda function permission to publish to the topic.
---UPDATE-----
I have changed my code a bit and now it looks like this:
var AWS = require("aws-sdk");
AWS.config.update({region:'us-east-1'});
var topicarn = 'arn:aws:s-s:us-east-1:927579412028:alexapushdemo';
var targetarn = 'arn:aws:sns:us-east-1:927579412028:endpoint/GCM/automation.home.visa.com.homeautomation/3af761b2-1955-34d8-b66a-85e232e0aa0e';
var payload = {
default: "test",
GCM: {
data: {
message: "test"
}
}
};
payload.GCM = JSON.stringify(payload.GCM);
payload = JSON.stringify(payload);
var sns = new AWS.SNS();
console.log('start of sns function')
sns.publish({
TargetArn: targetarn,
Message: payload,
MessageStructure: 'json'
}, function(err, data) {
if (err) {
console.log(err.stack);
// Notify Lambda that we are finished, but with errors
context.done(err, 'sns function finished with errors!');
return;
}
console.log('push sent');
console.log(data);
// Notify Lambda that we are finished
context.done(null, 'sns function finished!');
});
console.log('end of sns functions');
The error I get is:
ConfigError: Missing region in config\\n
at Request.VALIDATE_REGION (/node_modules/aws-sdk/lib/event_listeners.js:81:45)\\n
at Request.callListeners (/node_modules/aws-sdk/lib/sequential_executor.js:105:20)\\n
at callNextListener (/node_modules/aws-sdk/lib/sequential_executor.js:95:12)\\n
at /node_modules/aws-sdk/lib/event_listeners.js:75:9\\n
at finish (/node_modules/aws-sdk/lib/config.js:228:7)\\n
at /node_modules/aws-sdk/lib/config.js:268:9\\n
at resolveNext (/node_modules/aws-sdk/lib/credentials/credential_provider_chain.js:84:9)\\n
at /node_modules/aws-sdk/lib/credentials/credential_provider_chain.js:97:11\\n
at /node_modules/aws-sdk/lib/credentials.js:123:23\\n
at /node_modules/aws-sdk/lib/credentials/ec2_metadata_credentials.js:66:7\\"\",\"ip\":\"127.0.0.1\"}",
Why am I getting this even though I'm calling AWS.config.update.
iram,
If I take your exact code and paste it into a Lambda Node.js 4.3 function and execute a test from the Lambda Console, this is the result:
ERROR: InvalidParameter: Invalid parameter: TopicArn Reason: Both TopicArn and TargetArn specified. Use only one or the other
This means that in your params, you need to comment out either TopicArn or TargetArn or put in some logic to determine if the incoming payload contains an Arn that is a target endpoint or a topic endpoint.
You could still have permissions issues with Lambda execution role to SNS or to CW Logs, however, regardless if you have permission to publish or send logs to CloudWatch from your Lambda function, running a test from the console will always spit out some logging of what's going on.
Good luck.
const AWS = require('aws-sdk');
AWS.config.region = 'us-east-1';
specify the regeion to be used by the aws sdk like this