I have to add a sheet via API. I am using sheet.best API. It works fine: I can add a sheet, but I don't know how to add sheet1, sheet2.
function addsheet() {
const ss = SpreadsheetApp.getActive();
Sheets.Spreadsheets.batchUpdate({ requests: [{ addSheet: { properties: { index: 1, title: "myBullSheet" } } }] }, "Spreadsheet id");
}
Sheets.Spreadsheets.batchUpdate
Don't forget to enable Sheets API
Related
I have a short script to OCR jpg files by converting them to GDOCS. It works fine for JPGs around 5MB. But for a 600dpi scan, where the file size is more like 15MB I get the following error for a single image:
5:40:58 PM Notice Execution started
5:41:03 PM Error
GoogleJsonResponseException: API call to drive.files.insert failed with error: Request Too Large
convertJpegToGdoc # convertJpegToGdoc.gs:27
The relevant line of code is:
Drive.Files.insert({title: file.getName(), parents: [{id: dstFolderId}]
}
I am aware of quotas Quotas for Google Services The error I am getting is not one of these. The time shows that the script is not exceeding the 6 mins listed in the docs. BTW I can convert multiple images, each approx 1.5MB, with 24 character JPG file basenames, into gdocs without problems using this script.
The Google Drive API for insert docs javascript example suggests, perhaps, that I may need to upgrade my code to handle larger files. But I am not sure where to start.
Any suggestion appreciated.
Full code:
// this function does OCR while copying from ocrSource to ocrTarget
function convertJpegToGdoc() {
var files = DriveApp.getFolderById(srcFolderId).getFilesByType(MimeType.JPEG);
while (files.hasNext()) { var file = files.next();
Drive.Files.insert({title: file.getName(), parents: [{id: dstFolderId}]
},
file.getBlob(), {ocr: true});
}
// this moves files from ocrSource to ocrScriptTrash
// handy for file counting & keeping ocrSource free for next batch of files
var inputFolder = DriveApp.getFolderById(srcFolderId);
var processedFolder = DriveApp.getFolderById(trshFolderId);
var files = inputFolder.getFiles();
while (files.hasNext()) {
var file = files.next();
file.moveTo(processedFolder);
}
}
I believe your goal is as follows.
You want to convert a JPEG to Google Doucment as OCR using Drive API.
When I saw your question, I remembered that I experienced the same situation as you. At that time, even when the resumable upload is used, the same error of Request Too Large couldn't be removed. For example, as the reason for this issue, I thought the file size, the image size, the resolution of the image, and so on. But I couldn't find a clear reason for this. So, in that case, I used the following workaround.
My workaround is to reduce the image size. By this, the file size and image size can be reduced. By this, I could remove the issue. In this answer, I would like to propose this workaround.
When your script is modified, it becomes as follows.
From:
Drive.Files.insert({title: file.getName(), parents: [{id: dstFolderId}]
},
file.getBlob(), {ocr: true});
}
To:
try {
Drive.Files.insert({ title: file.getName(), parents: [{ id: dstFolderId }] }, file.getBlob(), { ocr: true });
} catch ({ message }) {
if (message.includes("Request Too Large")) {
const link = Drive.Files.get(file.getId()).thumbnailLink.replace(/=s.+/, "=s2000");
Drive.Files.insert({ title: file.getName(), parents: [{ id: dstFolderId }] }, UrlFetchApp.fetch(link).getBlob(), { ocr: true });
}
}
In this modification, when the error of Request Too Large occurs, the image size is reduced by modifying the thumbnail link. In this sample, the horizontal size is 2000 pixels by keeping the aspect ratio.
Note:
This modified script supposes that Drive API has already been enabled at Advanced Google services. Please be careful about this.
Added:
Your script in your question is as follows.
// this function does OCR while copying from ocrSource to ocrTarget
function convertJpegToGdoc() {
var files = DriveApp.getFolderById(srcFolderId).getFilesByType(MimeType.JPEG);
while (files.hasNext()) { var file = files.next();
Drive.Files.insert({title: file.getName(), parents: [{id: dstFolderId}]
},
file.getBlob(), {ocr: true});
}
// this moves files from ocrSource to ocrScriptTrash
// handy for file counting & keeping ocrSource free for next batch of files
var inputFolder = DriveApp.getFolderById(srcFolderId);
var processedFolder = DriveApp.getFolderById(trshFolderId);
var files = inputFolder.getFiles();
while (files.hasNext()) {
var file = files.next();
file.moveTo(processedFolder);
}
}
In my answer, I proposed the following modification.
From:
Drive.Files.insert({title: file.getName(), parents: [{id: dstFolderId}]
},
file.getBlob(), {ocr: true});
}
To:
try {
Drive.Files.insert({ title: file.getName(), parents: [{ id: dstFolderId }] }, file.getBlob(), { ocr: true });
} catch ({ message }) {
if (message.includes("Request Too Large")) {
const link = Drive.Files.get(file.getId()).thumbnailLink.replace(/=s.+/, "=s2000");
Drive.Files.insert({ title: file.getName(), parents: [{ id: dstFolderId }] }, UrlFetchApp.fetch(link).getBlob(), { ocr: true });
}
}
But, when I saw your current script, your current script is as follows.
// convertJpegToGdoc.js - script converts .jpg to .gdoc files
// Google Script Project - ocrConvert https://script.google.com/home/projects/1sDHfmK4H19gaLxxtXeYv8q7dql5LzoIUHto-OlDBofdsU2RyAn_1zbcr/edit
// clasp location C:\Users\david\Google Drive\ocrRollConversion
// Begin with empty folders (see below)
// Transfer a set of Electoral Roll .JPG from storage into ocrSource folder
// Running this script performs OCR conversions on the .JPG files
// .JPG files are converted to .GDOC & stored in ocrTarget
// The .JPG ocrSource files are transferred to ocrScriptTrash leaving the ocrSource folder empty if all goes well
// Uses Google Drive root folders (~\Google Drive\)
// 1. ocrSource
// 2. ocrTarget
// 3. ocrScriptTrash
// to check Id value open folder in Google Drive then examine URL
let srcFolderId = "###"; //ocrSource
let dstFolderId = "###"; //ocrTarget
let trshFolderId = "###"; //ocrScriptTrash
// this function does OCR while copying from ocrSource to ocrTarget (adjusted try/catch for larger jpgs)
function convertJpegToGdocRev1() {
var files = DriveApp.getFolderById(srcFolderId).getFilesByType(MimeType.JPEG);
try {
Drive.Files.insert({ title: file.getName(), parents: [{ id: dstFolderId }] }, file.getBlob(), { ocr: true });
} catch ({ message }) {
if (message.includes("Request Too Large")) {
const link = Drive.Files.get(file.getId()).thumbnailLink.replace(/=s.+/, "=s2000");
Drive.Files.insert({ title: file.getName(), parents: [{ id: dstFolderId }] }, UrlFetchApp.fetch(link).getBlob(), { ocr: true });
}
}
// this moves files from ocrSource to ocrScriptTrash
// handy for file counting & keeping ocrSource free for next batch of files
var inputFolder = DriveApp.getFolderById(srcFolderId);
var processedFolder = DriveApp.getFolderById(trshFolderId);
var files = inputFolder.getFiles();
while (files.hasNext()) {
var file = files.next();
file.moveTo(processedFolder);
}
}
Unfortunately, it seems that you miscopied my proposed answer. In your current script, var files = DriveApp.getFolderById(srcFolderId).getFilesByType(MimeType.JPEG); is not used. And, when try-catch is not used, an error occurs. But, because of try-catch, when you run the script, No result. No errors occurs. I think that the reason for your current issue of No result. No errors is due to this.
In order to use my proposed modification, please correctly copy and paste the script. By this, the modified script is as follows.
Modified script:
Please enable Drive API at Advanced Google services.
let srcFolderId = "###"; //ocrSource
let dstFolderId = "###"; //ocrTarget
let trshFolderId = "###"; //ocrScriptTrash
function convertJpegToGdocRev1() {
var files = DriveApp.getFolderById(srcFolderId).getFilesByType(MimeType.JPEG);
while (files.hasNext()) {
var file = files.next();
var name = file.getName();
console.log(name) // You can see the file name at the log.
try {
Drive.Files.insert({ title: name, parents: [{ id: dstFolderId }] }, file.getBlob(), { ocr: true });
} catch ({ message }) {
if (message.includes("Request Too Large")) {
const link = Drive.Files.get(file.getId()).thumbnailLink.replace(/=s.+/, "=s2000");
Drive.Files.insert({ title: file.getName(), parents: [{ id: dstFolderId }] }, UrlFetchApp.fetch(link).getBlob(), { ocr: true });
}
}
}
var inputFolder = DriveApp.getFolderById(srcFolderId);
var processedFolder = DriveApp.getFolderById(trshFolderId);
var files = inputFolder.getFiles();
while (files.hasNext()) {
var file = files.next();
file.moveTo(processedFolder);
}
}
I developed a few months ago a NodeJS API to get embed reports from Power BI (using a tenant). I consume this API from an Angular app. Now I want to get the report filtered, and I don't know if this is possible with my actual code.
I used the PowerBI rest API to get the embed report. Reading the docs of microsoft, I see lots of docs like this one, where says that I should create an object with the filters that I want. This is not a problem, but I don't know if this is compatible with mi actual Node API or I should develop a new solution.
My API follows the sample provided by Microsoft, and the code is:
async function getEmbedParamsForSingleReport(
workspaceId,
reportId,
additionalDatasetId
) {
const reportInGroupApi = `https://api.powerbi.com/v1.0/myorg/groups/${workspaceId}/reports/${reportId}`;
const headers = await getRequestHeader();
// Get report info by calling the PowerBI REST API
const result = await axios.get(reportInGroupApi, { headers });
if (result.status !== 200) {
throw result;
}
// Convert result in json to retrieve values
const resultJson = result.data;
// Add report data for embedding
const reportDetails = new PowerBiReportDetails(
resultJson.id,
resultJson.name,
resultJson.embedUrl
);
const reportEmbedConfig = new EmbedConfig();
// Create mapping for report and Embed URL
reportEmbedConfig.reportsDetail = [reportDetails];
// Create list of datasets
let datasetIds = [resultJson.datasetId];
// Append additional dataset to the list to achieve dynamic binding later
if (additionalDatasetId) {
datasetIds.push(additionalDatasetId);
}
// Get Embed token multiple resources
reportEmbedConfig.embedToken =
await getEmbedTokenForSingleReportSingleWorkspace(
reportId,
datasetIds,
workspaceId
);
return reportEmbedConfig;
}
With this I obtain the embed report and send back to my app. Is this solution compatible with filters?
Thanks in advance!
Finally, I came out with a solution. In mi Angular app, I use the library powerbi-client-angular. That allows me to define some configuration in the embed report:
basicFilter: models.IBasicFilter = {
$schema: 'http://powerbi.com/product/schema#basic',
target: {
table: 'items',
column: 'id',
},
operator: 'In',
values: [1,2,3],
filterType: models.FilterType.Basic,
requireSingleSelection: true,
displaySettings: {
/** Hiding filter pane */
isLockedInViewMode: true,
isHiddenInViewMode: true,
},
};
reportConfig: IReportEmbedConfiguration = {
type: 'report',
id: cuantitativeReportID,
embedUrl: undefined,
tokenType: models.TokenType.Embed,
filters: [this.basicFilter],
accessToken: undefined,
settings: undefined,
};
With this, I can avoid passing information to the NodeJS API
Yes, It will work fine with this solution. Please find the relevant code below:
Create a filter object:
const filter = {
$schema: "http://powerbi.com/product/schema#basic",
target: {
table: "Geo",
column: "Region"
},
operator: "In",
values: ["West", "Central"]
};
Add the filter to the report's filters:
await report.updateFilters(models.FiltersOperations.Add, [filter]);
You can refer sample NodeJS application to get embed reports from Power BI.
Please find the the reference here:
https://github.com/microsoft/PowerBI-Developer-Samples/tree/master/NodeJS
I'm using the Google Sheets API (v4) to create/update spreadsheets programmatically and have run into the following issue:
As per the documentation (https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/cells#CellFormat), I'm setting the number format to CURRENCY; this will correctly display the number as a numeric value with a ¥ sign at the front (Japanese locale). However, it does not seem to actually select the "Currency" option on the formats dropdown, and more importantly, does NOT reflect the specified format when downloading the spreadsheet (e.g. as an .xlsx file).
This is different from selecting the 'Currency' option manually (via the UI), in which case values are correctly displayed once downloaded.
Here's the relevant section of code:
import { google, sheets_v4 } from 'googleapis';
const sheetsApi = google.sheets({
version: 'v4',
auth: await this.getAuthClient(),
});
await sheetsApi.spreadsheets
.batchUpdate({
spreadsheetId: resource,
requestBody: {
requests: [
{
updateSpreadsheetProperties: {
fields: 'locale',
properties: {
locale: 'ja',
},
},
},
...,
{
repeatCell: {
fields: 'userEnteredFormat.numberFormat',
cell: {
userEnteredFormat: {
numberFormat: { type: 'CURRENCY' },
},
},
},
},
],
},
})
.catch((error) => {
console.log(error);
});
I've also tried settings the pattern (tried few different ones), but haven't been able to actually set the cell format, despite the value being displayed as such.
Probably a simple mistake, but I've been stuck on this for a while.. any help would be greatly appreciated!
In that case, I thought that the property of pattern might be required to be set. So in this case, how about modifying your request of repeatCell as follows?
Modified request:
{
"repeatCell": {
"range": {,,,}, // Please set range.
"cell": {
"userEnteredFormat": {
"numberFormat": {
"type": "CURRENCY",
"pattern": "[$¥-411]#,##0.00" // <--- Added
}
}
},
"fields": "userEnteredFormat.numberFormat" // or "fields": "userEnteredFormat"
}
}
Note:
In my environment, when above modified request is used for the batchUpdate method, I could confirm that "Currency" was checked.
References:
RepeatCellRequest- NumberFormat
Is it possible to export contents of kendo data grid to multiple sheets of Excel?
Yes, it is possible.
Setup your grid to capture excel export event
MVC:
.Events(e => e.ExcelExport("onExcelExportGrid"))
or javascript
"excelExport": onExcelExportGrid,
This javascript method will allow access to the workbook kendo will use to build excel file. This follows a very simple schema
var workbook = new kendo.ooxml.Workbook({
sheets: [
{
rows: [
{ cells: [ { value: "foo" } ] }
]
}
]
});
To prepend a new sheet, use something like
function excelPrependWorksheet(wb, worksheetName)
{
var sheet = {
"name": worksheetName,
"rows": [
{ "cells": [] }
]
}
wb["sheets"].unshift(sheet);
}
function onExcelExportGrid(e)
{
excelPrependWorksheet(e.workbook, "example");
// fill out row and cells values here.
}
Supported fields for cells, sheets, and workbook are documented at https://github.com/telerik/kendo-ui-core/blob/master/docs/api/javascript/ooxml/workbook.md
My goal is to create a service appointment record associated with multiple resources.
For this purpose I have followed this MSDN example.
The problem rises when I try to associate multiple resources for one particular service appointment, CRM server will store only the last one(1 record). Following demonstrates my code:
//attendees =[... array of resource ids]
var serviceAppointment = {
ScheduledStart: new Date("12/22/2014 4:53 PM"),
ScheduledEnd: new Date("12/22/2014 5:53 PM"),
Subject: "test service",
ServiceId:
{
//// hardcoded id for simplicity
Id: "6f795012-ca55-e411-aa38-00155d0a0948",
LogicalName: "service"
}
};
SDK.JQuery.createRecord(serviceAppointment,"ServiceAppointment"
,function(sa){
for(var i=0;i<attendees.length;i++)
{
var activityParty = {
PartyId:
{
Id: attendees[i],
LogicalName: "systemuser",
},
ActivityId:
{
Id: sa.ActivityId,
LogicalName: "serviceappointment",
},
ParticipationTypeMask:
{
//See http://msdn.microsoft.com/en-us/library/gg328549.aspx
//10 is for resource
Value: 10
}
};
SDK.JQuery.createRecord(activityParty,"ActivityParty", function(ap){debugger;},errorHandler);
}
}
,errorHandler);
As far as I debugged the code the create record is being executed properly without no exception. I believe I'm missing a configuration flag somewhere in my code and CRM is considering one to one association rather than one to many.
Any clues?
I could solve this issue by passing array of parties in service appointment record at the first place rather than inserting them one-by-one at the end. Here is the code which works:
var parties =[];
for(var i=0;i< e.event.attendees.length;i++)
{
var activityParty = {
PartyId:
{
Id: e.event.attendees[i],
LogicalName: "systemuser",
},
ParticipationTypeMask:
{
//See http://msdn.microsoft.com/en-us/library/gg328549.aspx
//10 is for resource
Value: 10
}
}
parties.push(activityParty);
}
var serviceAppointment = {
ScheduledStart: e.event.start,
ScheduledEnd: e.event.end,
Subject: e.event.title,
ServiceId:
{
Id: "6f795012-ca55-e411-aa38-00155d0a0948",
LogicalName: "service"
},
serviceappointment_activity_parties: parties,
};
SDK.JQuery.createRecord(serviceAppointment,"ServiceAppointment"
,function(sa){
debugger;
e.event.ActivityId = serviceAppointmentActivityId = sa.ActivityId;
}
,errorHandler);
}
hope it helps somebody out there.