Transferring SPL Token using #Solana\web3.js - javascript

I am trying to transfer SPL tokens and am getting the error from the function
mintToken.getOrCreateAssociatedAccountInfo(wallet.publicKey);
Error: Invalid seeds, address must fall off the curve
My wallet variable a an AnchorWallet
ToWallet is obtained via:
var toWallet = anchor.web3.Keypair.fromSecretKey(DEMO_TO_WALLET);
try {
if (wallet) {
const mintPublicKey = new anchor.web3.PublicKey("Token address");
const mintToken = new Token(
props.connection,
mintPublicKey,
TOKEN_PROGRAM_ID,
toWallet
);
const fromTokenAccount = await mintToken.getOrCreateAssociatedAccountInfo(
wallet.publicKey
);
const destPublicKey = new anchor.web3.PublicKey(toWallet);
// Get the derived address of the destination wallet which will hold the custom token
const associatedDestinationTokenAddr = await Token.getAssociatedTokenAddress(
mintToken.associatedProgramId,
mintToken.programId,
mintPublicKey,
destPublicKey
);
const receiverAccount = await props.connection.getAccountInfo(associatedDestinationTokenAddr);
const instructions: anchor.web3.TransactionInstruction[] = [];
if (receiverAccount === null) {
instructions.push(
Token.createAssociatedTokenAccountInstruction(
mintToken.associatedProgramId,
mintToken.programId,
mintPublicKey,
associatedDestinationTokenAddr,
destPublicKey,
wallet.publicKey
)
)
}
instructions.push(
Token.createTransferInstruction(
TOKEN_PROGRAM_ID,
fromTokenAccount.address,
associatedDestinationTokenAddr,
wallet.publicKey,
[],
1
)
);
const transaction = new anchor.web3.Transaction().add(...instructions);
transaction.feePayer = wallet.publicKey;
transaction.recentBlockhash = (await props.connection.getRecentBlockhash()).blockhash;
const transactionSignature = await props.connection.sendRawTransaction(
transaction.serialize(),
{ skipPreflight: true }
);
await props.connection.confirmTransaction(transactionSignature);

Please ensure that wallet.publicKey contains valid value.
console.log(wallet.publicKey);//I think this might be an empty string.
const fromTokenAccount = await mintToken.getOrCreateAssociatedAccountInfo(
wallet.publicKey
);

Related

how to use trim in fetch url

how to retrieve the verif code, here I try to do the next regex using trim but an error message appears "TypeError: Cannot read properties of undefined (reading 'trim')"
and I just want to fetch the verification code, like in the image
my code
const checkInboxUrl = 'https://getnada.com/api/v1/inboxes/';
const getMessageUrl = 'https://getnada.com/api/v1/messages/html/';
const refreshMailboxUrl = 'https://getnada.com/api/v1/u/';
/* eslint-disable no-unused-vars */
class Getnada {
constructor() {
this.email = '';
this.verificationCode = '';
}
async getEmail(email = 'urmxhbwrz#getnada.com') {
this.email = email;
return this;
}
async getMailbox(pattern, sleepTime = 5000) {
await sleep(sleepTime);
const timestamp = Math.floor(new Date().getTime() / 1000);
const refreshMailboxResponse = await fetch(refreshMailboxUrl + this.email + '/' + timestamp);
const checkInboxResponse = await fetch(checkInboxUrl + this.email);
const checkInboxJson = await checkInboxResponse.json();
const getMessageResponse = await fetch(getMessageUrl + checkInboxJson.msgs[0].uid);
const readInbox = await getMessageResponse.text();
const regex = new RegExp(pattern);
const verificationCodeMatch = regex.exec(readInbox);
this.verificationCode = verificationCodeMatch[1].trim();
console.log(verificationCodeMatch)
return this;
}
}
const getnada = new Getnada();
async function main() {
console.log((await getnada.getEmail()))
console.log((await getnada.getMailbox()))
}
main();
https://getnada.com/api/v1/messages/html/8lra5CwOQcHvja3mpQZgO7G5RPTS3W
To retrieve the verification code, you can try to change this lines :
const regex = new RegExp(pattern);
const verificationCodeMatch = regex.exec(readInbox);
this.verificationCode = verificationCodeMatch[1].trim();
to :
const verificationCodeMatch = pattern.exec(readInbox);
this.verificationCode = verificationCodeMatch[0].trim();
And change this line too :
console.log((await getnada.getMailbox()))
to :
console.log((await getnada.getMailbox(/\b\d{6,6}\b/)));
This regex /\b\d{6,6}\b/ will filter out strings containing exactly 6 digits of numbers which is the verification code.

Puppeteer: Save data from a for loop in database

I'm web-scraping a site and managed to extract data in the for loop.
However, I don't know how can I save it to my MongoDB database as I'm receiving an error ReferenceError: nameElement is not defined .
How can I save the results from my for loop as an object to my database?
const kclResults = [];
async function scrapeInfiniteScrollItems(
page,
scrollDelay = 10000
) {
try {
const html = await page.content();
const $ = cheerio.load(html);
await page.evaluate(() => {
let elements = $("[role='listitem']")
.find("._2DX0iPG8PDF3Si_o5PlzIj")
.toArray();
for (i = 0; i < elements.length; i++) {
$(elements[i]).click();
const nameElement = $("[data-log-name='PersonName']").text();
const emailElement = $("[data-log-name='Email']").text();
const allElements = $("[aria-label='Contact information']").text();
const officeLocation = $("[data-log-name='OfficeLocation']").text();
const position = $("[data-log-name='Company']").text();
const jobTitle = $("[data-log-name='JobTitle']").text();
const departament = $("[data-log-name='Department']").text();
console.log(
`email: ${emailElement} name: ${nameElement} allElements: ${allElements} \n office location: ${officeLocation} \n position: ${position} \n jobTitle: ${jobTitle} \n departament: ${departament}`
);
}
});
let kclResult = new KingsDB({
nameElement,
emailElement,
allElements,
officeLocation,
position,
jobTitle,
departament,
});
kclResults.push(kclResult);
console.log(kclResults);
kclResult.save();
return kclResults;
} catch (error) {
console.log(error);
}
}
You are declaring nameElement (and other variables) in for loop scope and trying to access it outside that scope.
Just create an array of "elements" and iterate over it when you're writing it to your DB. This code below should work:
const kclResults = [];
async function scrapeInfiniteScrollItems(
page,
scrollDelay = 10000
) {
try {
const html = await page.content();
const $ = cheerio.load(html);
const resultArr = await page.evaluate(() => {
let elements = $("[role='listitem']")
.find("._2DX0iPG8PDF3Si_o5PlzIj")
.toArray();
const resultArr = [];
for (i = 0; i < elements.length; i++) {
$(elements[i]).click();
const nameElement = $("[data-log-name='PersonName']").text();
const emailElement = $("[data-log-name='Email']").text();
const allElements = $("[aria-label='Contact information']").text();
const officeLocation = $("[data-log-name='OfficeLocation']").text();
const position = $("[data-log-name='Company']").text();
const jobTitle = $("[data-log-name='JobTitle']").text();
const departament = $("[data-log-name='Department']").text();
resultArr.push({
nameElement,
emailElement,
allElements,
officeLocation,
position,
jobTitle,
departament
});
console.log(
`email: ${emailElement} name: ${nameElement} allElements: ${allElements} \n office location: ${officeLocation} \n position: ${position} \n jobTitle: ${jobTitle} \n departament: ${departament}`
);
}
return resultArr;
});
const kclResults = [];
for (let result of resultArr) {
const {
nameElement,
emailElement,
allElements,
officeLocation,
position,
jobTitle,
departament
} = result;
let kclResult = new KingsDB({
nameElement,
emailElement,
allElements,
officeLocation,
position,
jobTitle,
departament,
});
kclResults.push(kclResult);
console.log(kclResults);
kclResults.push(kclResult.save());
}
return kclResults;
} catch (error) {
console.log(error);
}
}
PS: The function passed to pageEvaluate runs in browser context and thus doesn't have access to your node variables, until they are explicitly passed as an argument.

[REQ_RESOURCE_TYPE]: The resource must be a string, Buffer or a valid file stream

I Am Trying To Do Some GreyScale Effect In Pictures But When I Do The Command, It's Show me the error Above...
Any Fix?
The Code :
const jimp = require('jimp')
const {MessageAttachment} = require('discord.js')
module.exports = {
name:'grey',
run:async(client, message, args)=>{
if (message.attachments.size > 0) {
let image = message.attachments.first().url;
if(!image) return;
let readedImage = await jimp.read(image);
let sendedImage = await readedImage.greyscale()
let attch = new MessageAttachment(sendedImage,'Skyy.png');
message.channel.send(attch);
}
}
}
You must convert the jimp image to a buffer before sending it:
const jimp = require('jimp')
const {MessageAttachment} = require('discord.js')
module.exports = {
name:'grey',
run:async(client, message, args)=>{
if (message.attachments.size > 0) {
let image = message.attachments.first().url;
if(!image) return;
let readedImage = await jimp.read(image);
// changed line below
let sendedImage = await readedImage.greyscale().getBufferAsync();
let attch = new MessageAttachment(sendedImage,'Skyy.png');
message.channel.send(attch);
}
}
}

Node: Loop over lines of a file, process each asynchronously, wait for each result?

I have a file of name and date-of-birth-info. For each line in the file, I need to submit the data to a web form and see what result I get. I'm using Node and Puppeteer (headless), as well as readline to read the file.
The code works fine for small files, but when I run it on the full 5000 names, or even a few hundred, I end up with hundreds of headless instances of Chromium, bringing my machine to its knees and possibly creating confounding timeout errors.
I'd prefer to wait for each form submission to complete, or otherwise throttle the processing so that no more than x names are in process at once. I've tried several approaches, but none does what I want. I'm not a JS whiz at all, so there's probably questionable design going on.
Any thoughts?
const puppeteer = require('puppeteer');
const fs = require('fs');
const readline = require('readline');
const BALLOT_TRACK_URL = 'https://www.example.com/ballottracking.aspx';
const VOTER_FILE = 'MailBallotsTT.tab';
const VOTER_FILE_SMALL = 'MailBallotsTTSmall.tab';
const COUNTY = 'Example County';
checkBallot = (async ( fName, lName, dob, county ) => {
/* Initiate the Puppeteer browser */
const browser = await puppeteer.launch({headless:true });
const page = await browser.newPage();
await page.goto( BALLOT_TRACK_URL, { waitUntil: 'networkidle0' });
// fill out the form
await page.type('#ctl00_ContentPlaceHolder1_FirstNameText', fName );
await page.type('#ctl00_ContentPlaceHolder1_LastNameText', lName );
await page.type('#ctl00_ContentPlaceHolder1_DateOfBirthText', dob );
await page.type('#ctl00_ContentPlaceHolder1_CountyDropDown', county );
let pageData = await page.content();
// Extract the results from the page
try {
submitSelector = 'input[name="ctl00$ContentPlaceHolder1$RetrieveButton"]';
tableSelector = '#ctl00_ContentPlaceHolder1_ResultPanel > div > div > div > table > tbody > tr:nth-child(3) > td:nth-child(7) > div';
foundSubmitSelector = await page.waitForSelector(submitSelector, { timeout: 5000 } );
clickResult = await page.click( submitSelector );
foundTable = await page.waitForSelector(tableSelector, { timeout: 5000 } )
let data = await page.evaluate( ( theSelector ) => {
let text = document.querySelector( theSelector ).innerHTML.replaceAll('<br>', '').trim();
/* Returning an object filled with the scraped data */
return {
text
}
}, tableSelector );
return data;
} catch (error) {
return {
text: error.message
}
} finally {
browser.close();
}
});
const mainFunction = () => {
const readInterface = readline.createInterface({
input: fs.createReadStream( VOTER_FILE_SMALL ),
output: null,
console: false
});
readInterface.on('line', async(line) => {
split = line.split( '\t' );
fName = split[0];
lName = split[1];
dob = split[2];
checkResult = await checkBallot( fName, lName, dob, COUNTY );
console.log( line + '\t' + checkResult.text );
to = await new Promise(resolve => setTimeout(resolve, 5000));
});
};
mainFunction();
Here is some code that implements my suggestion in the comment. I have used a setTimeout to represent the async code that you have, but in principle the approach should be easily adaptable:
// Source file testReadFileSync.js
const fs = require( "fs" );
const data = fs.readFileSync( "./inputfile.txt", { encoding: "utf-8" } );
const lines = data.split( "\n" );
console.log( `There are ${lines.length} lines to process`);
var currentLine = 0;
function main(){
// Check if we have processed all the lines
if (currentLine == lines.length ) return;
// get the current line number, then increment it;
let lineNum = currentLine++;
// Process the line
asyncProcess( lineNum, lines[ lineNum ] )
}
function asyncProcess( lineNum, line ){
console.log( `Start processing line[${lineNum}]` );
let delayMS = getRandomInt( 100 ) * 10
setTimeout( function(){
// ------------------------------------------------
// this function represents all the async processing
// for one line.
// After async has finished, we call main again
// ------------------------------------------------
console.log( `Finished processing line[${lineNum}]` );
main();
}, delayMS )
}
function getRandomInt(max) {
return Math.floor(Math.random() * Math.floor(max));
}
// Start four parallel async processes, each of which will process one line at a time
main();
main();
main();
main();

Nodejs Scraper isn't moving to next page(s)

Hey guys this is a follow on from my other question, i have created a Nodejs Scraper that doesnt seem to want to go through the pages, it stays on the first. my source code is below
const rp = require('request-promise');
const request = require('request');
const otcsv = require('objects-to-csv');
const cheerio = require('cheerio');
//URL To scrape
const baseURL = 'xxx';
const searchURL = 'xxxx';
//scrape info
const getCompanies = async () => {
// Pagination test
for (let index = 1; index <= 20; index = index + 1) {
const html = await rp.get(baseURL + searchURL + index);
const $ = await cheerio.load(html);
console.log("Loading Pages....");
console.log("At page number " + index);
// end pagination test
//const htmls = await rp(baseURL + searchURL);
const businessMap = cheerio('a.business-name', html).map(async (i, e) => {
const link = baseURL + e.attribs.href;
const innerHtml = await rp(link);
const emailAddress = cheerio('a.email-business', innerHtml).prop('href');
const name = e.children[0].data || cheerio('h1', innerHtml).text();
const phone = cheerio('p.phone', innerHtml).text();
return {
// link,
name,
emailAddress: emailAddress ? emailAddress.replace('mailto:', '') : '',
phone,
}
}).get();
return Promise.all(businessMap);
}
};
console.log("Finished Scraping.... Now Saving!")
//save to CSV
getCompanies()
.then(result => {
const transformed = new otcsv(result);
return transformed.toDisk('./output.csv');
})
.then(() => console.log('Scrape Complete :D '));
As you can see I have tried a few different ways to make this happen so any help will be gratefully appreciated.

Categories

Resources