Since Microsoft in those days is giving away 100GB + 100GB and I've got some GBs free for a few years I tought of using this space as a media storage directory for files downloaded from my VPS.
So I started reading the OneDrive REST Docs for authenticate myself using Node.JS and I did it successfully.
I'm able to:
Login and access whenever I want (using Access Tokens and Refresh Tokens)
Upload, edit, remove a file
Create, edit, remove a directory
List a directory content
All of this by using the REST API.
The problem is that due to some rescrition of these OneDrive REST APIs I have to cycle through directories to detect their contents (making N calls, depending on the depth of the folder) instead of requesting one single time the desired directory.
I'm just trying to develop a ls like command for OneDrive to complete my VPS-OneDrive sync project.
This is my code so far:
function lsdir(path, originalpath, folder, callback) {
dirs = path.split("/");
if (folder == null) {
originalpath = path;
request("https://apis.live.net/v5.0/me/skydrive?access_token=" + at, function(err, res, body) {
var json = JSON.parse(body);
var upload_location = json.upload_location;
request(upload_location + "?access_token=" + at, function(err, res, body) {
//console.log(body);
json = JSON.parse(body);
var found = false;
if (originalpath == "/") {
callback(json, originalpath);
return;
}
for (i in json.data) {
currentFolder = json.data[i];
if (currentFolder.name == dirs[1]) {
found = currentFolder;
if (dirs.length > 2) {
lsdir(dirs.slice(2, dirs.length).join("/"), originalpath, currentFolder, callback);
} else {
callback(currentFolder, originalpath);
return;
}
break;
}
}
});
});
} else if (dirs.length > 0) {
request(folder.upload_location + "/?access_token=" + at, function(err, res, body) {
json = JSON.parse(body);
var found = false;
if (dirs.length == 1) {
callback(json, originalpath);
} else {
for (i in json.data) {
currentFolder = json.data[i];
if (currentFolder.name == dirs[0]) {
lsdir(dirs.slice(1, dirs.length).join("/"), originalpath, currentFolder, callback);
break;
}
}
}
});
}
}
lsdir("/vpsSync/", null, null, function(folder, originalpath) {
console.log(("Directory contents of " + originalpath).bold);
console.log(folder.data.map(function(arr) {
if (arr.type == "folder") {
return arr.name + "/";
}
return arr.name;
}).join("\n"));
});
Is there a way to reduce the recursion (or remove it completly)? The Micro$ft servers are really slow (3-5 secs per request!)
Thank you all and enjoy the 215GB of OneDrive :)
You may want to consider using our just-announced API, which has the ability to do sync.
http://onedrive.github.io/items/view_changes.htm
If you start the sync operation from the point at which you want to begin traversing the hierarchy, and do not provide a token, it should return you a complete view. For larger hierarchies you will still need to make multiple requests (based on the value of "#changes.hasMorechanges" and "#odata.nextLink") but that will still be far fewer than one per folder.
Related
I installed the rets-client package from npm.
I ran other query and get meta which works fine but when I am trying to do the photo streaming example I kept on getting errors
Error: RetsReplyError: RETS Server reply while attempting getObject - ReplyCode 20403 (NO_OBJECT_FOUND); ReplyText: No Object Found [260978536:1].
I followed the the code in the example
https://github.com/sbruno81/rets-client#photo-streaming-example
try {
rets.getAutoLogoutClient(clientSettings, async (client) => {
const photoIds = {
'260978536': '*', // get all photos for listingId 260978536
};
const photoStream = await client.objects.stream.getObjects('Property', 'Photo', photoIds, {
alwaysGroupObjects: true,
ObjectData: '*'
});
console.log("========================================");
console.log("======== Photo Stream Results ========");
console.log("========================================");
return new Promise(function (resolve, reject) {
let i = 0;
photoStream.objectStream.on('data', function (event) {
try {
if (event.type === 'headerInfo') {
console.log(' ~~~~~~~~~ Header Info ~~~~~~~~~');
outputFields(event.headerInfo);
return
}
console.log(" -------- Photo " + (i + 1) + " --------");
if (event.type === 'error') {
console.log(" Error: " + event.error);
} else if (event.type === 'dataStream') {
outputFields(event.headerInfo);
fileStream = fs.createWriteStream(
"/tmp/photo_" + event.headerInfo.contentId + "_" + event.headerInfo.objectId + "." + event.headerInfo.contentType.match(/\w+\/(\w+)/i)[1]);
event.dataStream.pipe(fileStream);
}
i++;
} catch (err) {
reject(err);
}
});
photoStream.objectStream.on('error', function (errorInfo) {
reject(errorInfo);
});
photoStream.objectStream.on('end', function () {
resolve();
});
})
})
} catch (errorInfo) {
const error = errorInfo.error || errorInfo;
console.log(" ERROR: issue encountered:");
outputFields(error);
console.log(' ' + (error.stack || error).replace(/\n/g, '\n '));
}
reason I used that photo id is because when I do query I can see that this listing id has PictureCount of 20 but somehow it's giving me no object found.
sample listing query return for the same id
{ L_Area: 'Islands-Van. & Gulf',
L_ListingID: '260978536',
L_Status: 'Expired',
L_PictureCount: '20',
L_Last_Photo_updt: '2015-07-15T04:27:00',
L_DisplayId: 'V1064230' }
Can someone please give me a hand on where I am doing wrong here?
Thanks in advance for any help and suggestions.
P.S. I also tried using one L_ListingID with L_Status as Active instead of Expired but the result is the same
The RETS server you're connecting to does not allow image downloads because it's a staging server and they want to keep the bandwidth low. You'll have to test your code against their production server, or ask the MLS to allow downloads from their staging environment.
Points to note while downloading images from RETS server:
Ensure you have permission to access listing images.
Secondly check you have image download access or public image url access only(CDN link)? Depends on RETS server either one or both permission will be given.
To download images/imageURLs you need photoIds. Here either "listingId" or "listingKey" will work, again depends on RETS server. So try with both.
You may have access to multiple image types like Thumbnail, normal size and high resolution. That also you can mention in the "getObject" method.
Once an image/imageURL downloaded, frequently cross check Photo Modification Timestamp field to identify any modification to the image/imageURL.
Some of the RETS servers will provide image URLs as data via resources like Media, Tour etc.
I've been struggling with trying to automate and clean up how I utilize sprite generation and loading in my HTML5 game using a NodeJS socket.io server to send an object containing the data needed to generate the sprites.
What I want to do to achieve this is to read through a directory /img and all its subdirectories (/assets1, /assets2, /assets3, etc) and create an object based on the data and structure of them. The problem I came across was that I couldn't find a nice way to handle the sub directories of, say, /assets3. Here's how my assets are setup as an example:
And here's the object example that I want to achieve but haven't without just using endless if/elses which honestly doesn't seem appealing to me and there has got to be a better way with the usage of a library.
var outputWeWant = {
assets1: {
img1: '/img/assets1/img1.png',
img2: '/img/assets1/img2.png',
},
assets2: {
img1: '/img/assets2/img1.png',
img2: '/img/assets2/img2.png',
},
assets3: {
img1: '/img/assets3/img1.png',
img2: '/img/assets3/img2.png',
assets4: {
img1: '/img/assets3/assets4/img1.png'
}
}
}
Below is just a little bit of brainstorming I did, but this isn't as effective as I want down the road and it looks disgusting having all the is a directory check as we add a new directory into assets4
fs.readdirSync('/img/').map(dirName => {
fs.readdirSync('/img/' + dirName).map(fileName => {
if (fs.statSync('/img/' + dirName + '/' + fileName).isDirectory()) {
// Read the new directory and add the files to our object
} else {
// It's not a directory, just add it to our object
}
});
});
This kind of potentially infinite operation calls for a recursive function. I’m going to assume this function is to be written for Node, and I’ll leave the filesystem details to the OP. This snippet should be treated as pseudo-code.
function parseDirectory(directory) {
return directory.getItems().reduce((out, item) => {
switch (item.type) {
case 'file':
out[item.name] = item.path;
break;
case 'directory':
out[item.name] = parseDirectory(item.path);
break;
}
return out;
}, {});
}
With the added fs code in the OP, here’s a (theoretically) working function:
function parseDirectory(directory) {
return fs.readdirSync(directory).reduce((out, item) => {
const itemPath = `${directory}/${item}`;
if (fs.statSync(itemPath).isDirectory()) {
out[item] = parseDirectory(itemPath);
} else {
out[item] = itemPath;
}
return out;
}, {});
}
Of if the syntax of reduce() is too contrived for your liking, try this:
function parseDirectory(directory) {
let out = {};
fs.readdirSync(directory).forEach(item => {
const itemPath = `${directory}/${item}`;
if (fs.statSync(itemPath).isDirectory()) {
out[item] = parseDirectory(itemPath);
} else {
out[item] = itemPath;
}
});
return out;
}
I try to detect changes on the users drive to update the contents in a list but when I request for changes I receive all entries on the drive.
To explain why I want to do this:
This is a handy feature when an user has two tabs opened, one with the google drive environment and one with an application that uses the drive (doesn't need to reload the app to see content changes made in the drive environment).
I'm following the guide listed here: https://developers.google.com/drive/v2/reference/changes/list
A strange thing is that I need a largestChangeId+1, but how do I know this value? I do not know the largestChangeId and set it to null. No matter what I'm doing with this value I will get always all the content.
I made the following code:
o.getCloudFileUpdates = function(fCallback, sFolderId )
{
var oDefQ = {q:'trashed=false '+(typeof sFolderId == 'string'?('and "'+sFolderId+'" in parents'):''), largestChangeId:null, orderby:'title', maxResults:1000},
fGetFiles = function(request, result)
{
request.execute(function(resp)
{
//$d(resp);
if( resp.items instanceof Array )
{
result = result.concat(resp.items);
if( resp.nextPageToken )
{ // Get next page and 'break' this function
return fGetFiles(gapi.client.drive.changes.list($j.extend(oDefQ,{'pageToken':resp.nextPageToken})), result);
}
}
if( result.length )
{
result.sort(function(a,b)
{
if (a.title < b.title)
{ return -1; }
if (a.title > b.title)
{ return 1; }
return 0;
});
fCallback(result);
return;
}
fCallback(false);
});
};
try {
fGetFiles(gapi.client.drive.changes.list(oDefQ), []);
} catch(e) { fCallback(false); return false; }
return true;
};
How do I get the latest changes whithout knowing the largestChangeId?
Start with zero on your first call. Within the response is the current largest change id, which you need to store and use on the next request. In your code, it will materialise as "resp.largestChangeId".
Is there a better way to create several folder at once in Google drive. I know I could run the createFolder() function three times but this seems to be quite not efficiat?
Right now I'm just running it like so:
createFolder("pen",id,function(){
createFolder("paper",id,function(){
createFolder("scripts",id,function(){
console.log(file)
})
})
});
Maybe I'm doing it wrong but basically I'm organizing all generated files in folders and maybe Google Drive intended to do this differently. What you guys/gals thing?
You need to alter the flow similar to what's below:
function createFolders(folders, id, callback) {
var remaining = folder.length;
var results = [];
for (var i = 0; i < remaining; i++) {
createFolder(folders[i], id, function() {
remaining--;
if (remaining == 0) {
callback && callback();
}
});
} // end of for
}
createFolders(["pen", "paper", "scripts"], id, function() {
// all jobs executed
});
If you are using the Google APIs JavaScript Client Lib, batch requests are supported: https://developers.google.com/api-client-library/javascript/features/rpcbatch
Using NodeJs I'm trying to do something quite similar to Meteor: I want to send only the parts of a page that actually changed. My dilemma is that I know how to create such a framework to respond to link clicks and send partial updates but such a framework won't cater to direct browsing to a page other than the index (which is what is required for search engines and people without javascript to use your site).
I can also figure out how to make a framework to support entire page reloads, handlebars and a simple node server instance would take care of that. Hoeever, I can't figure out how to create a way that would allow me to write one method to tell the framework the partial updates for a page and let the framework figure out what else needs to be loaded.
A way I can think of would be to create the index page every time (for entire page loads) and apply partial updates to that but that can quickly become expensive if a subpage differs a lot from a very crowded index.
An example method would look something like this:
function images(id) {
if (id == null) {
// load all images from database
template.images = db.result();
template.content = template.loadblock('gallery');
}
else {
// retrieve single image
template.content = template.loadblock('single_image');
template.image = db.result();
}
}
On a partisl updste calling this method for domain.com/images would work just fine because it's clear what had changed.
For an entire page load this function would miss things like a header, footer ,navigation, etc.
In an answer I would look for an example where this has been done or some tips that Can point me in the right direction. I'm sorry for any typoes I wrote this post on an ipad. If you have any questions about my question just ask and I'll update as needed.
Update:
A possible example of a solution might be the following code. It's to give an idea, it probably won't actually run
// As a convention, don't pass around raw values if they aren't static but pass around functions such as
data.images = function () {
// run db query
// return an object with the images
}
// This constraint might be limited to the index() method
var routes = {
// This now allows us to do things like this:
index: function() {
var data;
// Initialise everything needed for the index
data.title = 'Index';
data.nav = { Index: '/', Images: '/images' };
data.content = 'Hello World';
},
categories: function() {
var data;
data.content = render('gallery', function () { /* load and return images as object */ }); // Not sure about this dynamic subtemplating but oh well
}
// This now allows us to do the following:
function request(page, type) {
if (type == 'update') {
if (routes[page] != undefined && typeof routes[page] == 'function') {
respond(routes[page]());
}
}
else {
if (routes[page] != undefined && typeof routes[page] == 'function') {
var data = mergeArrays(routes['index'](), routes[page]());
// index.html which is just a Handlebars template
respond(data);
}
}
}
Here is a pattern I often use (in Express apps):
function respond(req, res, name, resource) {
if(req.accepts('json')) {
// Send JSON to clients who want it
res.send(resource);
} else {
// Render with layout only for non-XHR requests
resource.layout = !req.xhr;
res.render('resources/' + name, resource);
}
}
Example usage:
app.get('/images', function(req, res, next) {
getImages(function(err, images) {
if(err) return next(err);
respond(req, res, 'images', images);
});
});
app.get('/images/:id', function(req, res, next) {
getImage(req.params.id, function(err, image) {
if(err) return next(err);
respond(req, res, 'image', image);
});
});
image.jade:
img(src=uri, alt=title)
images.jade:
#gallery
for image in images
include image
Clients who ask for JSON get that, otherwise they get the full page only if it's a non-XHR request. XHR requests get just the HTML snippet for the requested resource. This works well for quite simple apps, where resources mostly correspond to pages.