var fs = require('fs');
var outfile = "primes.txt";
function getPrimes(max) {
var primeSieve = [], i, j, primes = [];
for (i = 2; i <= max; ++i) {
if (!primeSieve[i]) {
// i has not been marked - it is prime
primes.push(i);
for (j = i << 1; j <= max; j += i) {
primeSieve[j] = true;
}
}
}
return primes;
}
fs.writeFileSync(outfile, getPrimes(1000).slice(0,100) + ",");
console.log("Script: " + __filename + "\nWrote: " + getPrimes(1000).slice(0,100) + "To: " + outfile);
I have the above piece of code that I modified to produce an output (the main algorithm provided by someone else). I am new to Javascript and am unsure of what the following line is actually doing and what the << operator means (I have been unable to find out on the Javascript website).
for (j = i << 1; j <= max; j += i)
I know that it is marking the relevant numbers in the main primeSieve array as true so that they do not populate the primes array, however I don't know how it is doing this.
The << operator is the left shift operator. The left argument (after conversion to an integer value, if necessary) is shifted to the left by the number of bits specified by the right argument, right-filling with zeroes. Shifting left by one is the same as multiplying by 2.
The inner loop simply stores true in every element of primeSieve that is at an index that is a multiple of i. Thus, if primeSieve[j] is true, then j must be a multiple of some previous i (hence j cannot be prime). Conversely, if primeSieve[i] is not true, then it was not a multiple of any previous value of i; since that includes all integers from 2 to i-1, i must then be prime.
For collecting all primes up to a certain maximum, this method is far superior to techniques that independently test each integer for primality. However, it is far from the most efficient method. For instance, note that an element of primeSieve might get set to true several times. For instance, primeSieve[6] is set when i==2 and again when i==3. Also, once i exceeds the square root of max, the inner loop is a waste, since all composite numbers up to max are guaranteed to have been marked at that point. See the Wikipedia article on the Sieve of Eratosthenes for more about how this all works and pointers to even more efficient methods.
P.S. That code looks suspiciously familiar. :-)
Related
I'm building an app and in one of my functions I need to generate random & unique 4 digit codes. Obviously there is a finite range from 0000 to 9999 but each day the entire list will be wiped and each day I will not need more than the available amount of codes which means it's possible to have unique codes for each day. Realistically I will probably only need a few hundred codes a day.
The way I've coded it for now is the simple brute force way which would be to generate a random 4 digit number, check if the number exists in an array and if it does, generate another number while if it doesn't, return the generated number.
Since it's 4 digits, the runtime isn't anything too crazy and I'm mostly generating a few hundred codes a day so there won't be some scenario where I've generated 9999 codes and I keep randomly generating numbers to find the last remaining one.
It would also be fine to have letters in there as well instead of just numbers if it would make the problem easier.
Other than my brute force method, what would be a more efficient way of doing this?
Thank you!
Since you have a constrained number of values that will easily fit in memory, the simplest way I know of is to create a list of the possible values and select one randomly, then remove it from the list so it can't be selected again. This will never have a collision with a previously used number:
function initValues(numValues) {
const values = new Array(numValues);
// fill the array with each value
for (let i = 0; i < values.length; i++) {
values[i] = i;
}
return values;
}
function getValue(array) {
if (!array.length) {
throw new Error("array is empty, no more random values");
}
const i = Math.floor(Math.random() * array.length);
const returnVal = array[i];
array.splice(i, 1);
return returnVal;
}
// sample code to use it
const rands = initValues(10000);
console.log(getValue(rands));
console.log(getValue(rands));
console.log(getValue(rands));
console.log(getValue(rands));
This works by doing the following:
Generate an array of all possible values.
When you need a value, select one from the array with a random index.
After selecting the value, remove it from the array.
Return the selected value.
Items are never repeated because they are removed from the array when used.
There are no collisions with used values because you're always just selecting a random value from the remaining unused values.
This relies on the fact that an array of integers is pretty well optimized in Javascript so doing a .splice() on a 10,000 element array is still pretty fast (as it can probably just be memmove instructions).
FYI, this could be made more memory efficient by using a typed array since your numbers can be represented in 16-bit values (instead of the default 64 bits for doubles). But, you'd have to implement your own version of .splice() and keep track of the length yourself since typed arrays don't have these capabilities built in.
For even larger problems like this where memory usage becomes a problem, I've used a BitArray to keep track of previous usage of values.
Here's a class implementation of the same functionality:
class Randoms {
constructor(numValues) {
this.values = new Array(numValues);
for (let i = 0; i < this.values.length; i++) {
this.values[i] = i;
}
}
getRandomValue() {
if (!this.values.length) {
throw new Error("no more random values");
}
const i = Math.floor(Math.random() * this.values.length);
const returnVal = this.values[i];
this.values.splice(i, 1);
return returnVal;
}
}
const rands = new Randoms(10000);
console.log(rands.getRandomValue());
console.log(rands.getRandomValue());
console.log(rands.getRandomValue());
console.log(rands.getRandomValue());
Knuth's multiplicative method looks to work pretty well: it'll map numbers 0 to 9999 to a random-looking other number 0 to 9999, with no overlap:
const hash = i => i*2654435761 % (10000);
const s = new Set();
for (let i = 0; i < 10000; i++) {
const n = hash(i);
if (s.has(n)) { console.log(i, n); break; }
s.add(n);
}
To implement it, simply keep track of an index that gets incremented each time a new one is generated:
const hash = i => i*2654435761 % (10000);
let i = 1;
console.log(
hash(i++),
hash(i++),
hash(i++),
hash(i++),
hash(i++),
);
These results aren't actually random, but they probably do the job well enough for most purposes.
Disclaimer:
This is copy-paste from my answer to another question here. The code was in turn ported from yet another question here.
Utilities:
function isPrime(n) {
if (n <= 1) return false;
if (n <= 3) return true;
if (n % 2 == 0 || n % 3 == 0) return false;
for (let i = 5; i * i <= n; i = i + 6) {
if (n % i == 0 || n % (i + 2) == 0) return false;
}
return true;
}
function findNextPrime(n) {
if (n <= 1) return 2;
let prime = n;
while (true) {
prime++;
if (isPrime(prime)) return prime;
}
}
function getIndexGeneratorParams(spaceSize) {
const N = spaceSize;
const Q = findNextPrime(Math.floor(2 * N / (1 + Math.sqrt(5))))
const firstIndex = Math.floor(Math.random() * spaceSize);
return [firstIndex, N, Q]
}
function getNextIndex(prevIndex, N, Q) {
return (prevIndex + Q) % N
}
Usage
// Each day you bootstrap to get a tuple of these parameters and persist them throughout the day.
const [firstIndex, N, Q] = getIndexGeneratorParams(10000)
// need to keep track of previous index generated.
// it’s a seed to generate next one.
let prevIndex = firstIndex
// calling this function gives you the unique code
function getHashCode() {
prevIndex = getNextIndex(prevIndex, N, Q)
return prevIndex.toString().padStart(4, "0")
}
console.log(getHashCode());
Explanation
For simplicity let’s say you want generate non-repeat numbers from 0 to 35 in random order. We get pseudo-randomness by polling a "full cycle iterator"†. The idea is simple:
have the indexes 0..35 layout in a circle, denote upperbound as N=36
decide a step size, denoted as Q (Q=23 in this case) given by this formula‡
Q = findNextPrime(Math.floor(2 * N / (1 + Math.sqrt(5))))
randomly decide a starting point, e.g. number 5
start generating seemingly random nextIndex from prevIndex, by
nextIndex = (prevIndex + Q) % N
So if we put 5 in we get (5 + 23) % 36 == 28. Put 28 in we get (28 + 23) % 36 == 15.
This process will go through every number in circle (jump back and forth among points on the circle), it will pick each number only once, without repeating. When we get back to our starting point 5, we know we've reach the end.
†: I'm not sure about this term, just quoting from this answer
‡: This formula only gives a nice step size that will make things look more "random", the only requirement for Q is it must be coprime to N
This problem is so small I think a simple solution is best. Build an ordered array of the 10k possible values & permute it at the start of each day. Give the k'th value to the k'th request that day.
It avoids the possible problem with your solution of having multiple collisions.
I wrote two prime finder functions and the sieve only performs about 10% better. I'm using two optimizations for the simple version.
Don't check even numbers
Only check up to the square root or j * j <= i. ( equivalent )
and one optimization for the sieve version
Only check up to the square root or i * i <= n. ( equivalent )
What optimizations can I add to the sieve?
My sieve is pretty slow. I don't want to do a bitwise implementation yet, I want to understand if this implementation offers any benefits.
Or if I missed an implementation point.
The inner for loop in the pseudocode here looks interesting / odd
https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
I don't know how to interpret it. (update: the OP seems to indicate in the comments that it was an issue with incorrect formatting after copy-pasting the pseudocode from Wikipedia, and with the corrected formatting it is clear now)
Here it is:
algorithm Sieve of Eratosthenes is: input: an integer n > 1.
output: all prime numbers from 2 through n.
let A be an array of Boolean values, indexed by integers 2 to n,
initially all set to true.
for i = 2, 3, 4, ..., not exceeding √n do
if A[i] is true
for j = i2, i2+i, i2+2i, i2+3i, ..., not exceeding n do
A[j] := false
return all i such that A[i] is true.
// prime-2
// 2 optimizations - odds and square root
function prime2(n){
const primes = [2];
not_prime: for(let i = 3; i < n; i += 2){
for(let j = 2; j * j <= i; j++){
if(i % j === 0){
continue not_prime;
}
}
primes.push(i);
}
return primes;
}
// prime-3
// sieve implementation
function prime3 (n) {
const primes = [];
const sieve = (new Array(n)).fill(true);
for (let i = 2; i * i <= n; i += 1) {
if (sieve[i]) {
for (let j = i + i; j < n; j += i) {
sieve[j] = false;
}
}
}
makePrimes(sieve, primes, n);
return primes;
};
function makePrimes(sieve, primes, n){
for (let i = 2; i < n; i++) {
if(sieve[i]) {
primes.push(i);
}
}
}
What you see is an expression of the differences in theoretical run time complexities, i.e. the true algorithmic differences between the two algorithms.
Optimal trial division sieve's complexity is O(n1.5/(log n)2)(*) whereas the sieve of Eratosthenes' complexity is O(n log log n).
According to the empirical run time figures posted by Scott Sauyet in the comments,
1e6 279ms 36ms
1e7 6946ms 291ms
-------------------------
n^ 1.40 0.91
the empirical orders of growth are roughly ~n1.4 and ~n in the measured range, which is a good fit.
So your genuine sieve does perform well. The trial division sieve performs as expected. The algorithmic nature of a code will always beat any presence or absence of any secondary optimizations, if we increase the problem size enough.
And comparing performances by measuring them at just one problem-size point is never enough. So even if you see just 10% difference over the "simpler one", if you test at bigger sizes, the difference will be bigger.
If you want some pointers about what can be further improved in your code, do note that you start the inner loop from i+i instead of from i*i, for starters.
Another common optimization is to special-case 2, start from 3 and increment the candidates by 2 and use the inner loop increment of 2*i instead of just i, to achieve instant 2x speedup. This is the simplest form of wheel factorization optimization, which can be further applied, with diminishing returns though for each additional prime. But using 2-3-5-7 is common and should give about another 2x speedup, if memory serves.
Last but not least, make it segmented.
(*) that's π(n)* π(√n) coming from primes, and no more than that, from the composites.
A friend of mine takes a sequence of numbers from 1 to n (where n > 0)
Within that sequence, he chooses two numbers, a and b
He says that the product of a and b should be equal to the sum of all numbers in the sequence, excluding a and b
Given a number n, could you tell me the numbers he excluded from the sequence?
Have found the solution to this Kata from Code Wars but it times out (After 12 seconds) in the editor when I run it; any ideas as too how I should further optimize the nested for loop and or remove it?
function removeNb(n) {
var nArray = [];
var sum = 0;
var answersArray = [];
for (let i = 1; i <= n; i++) {
nArray.push(n - (n - i));
sum += i;
}
var length = nArray.length;
for (let i = Math.round(n / 2); i < length; i++) {
for (let y = Math.round(n / 2); y < length; y++) {
if (i != y) {
if (i * y === sum - i - y) {
answersArray.push([i, y]);
break;
}
}
}
}
return answersArray;
}
console.log(removeNb(102));
.as-console-wrapper { max-height: 100% !important; top: 0; }
I think there is no reason for calculating the sum after you fill the array, you can do that while filling it.
function removeNb(n) {
let nArray = [];
let sum = 0;
for(let i = 1; i <= n; i++) {
nArray.push(i);
sum += i;
}
}
And since there could be only two numbers a and b as the inputs for the formula a * b = sum - a - b, there could be only one possible value for each of them. So, there's no need to continue the loop when you find them.
if(i*y === sum - i - y) {
answersArray.push([i,y]);
break;
}
I recommend looking at the problem in another way.
You are trying to find two numbers a and b using this formula a * b = sum - a - b.
Why not reduce the formula like this:
a * b + a = sum - b
a ( b + 1 ) = sum - b
a = (sum - b) / ( b + 1 )
Then you only need one for loop that produces the value of b, check if (sum - b) is divisible by ( b + 1 ) and if the division produces a number that is less than n.
for(let i = 1; i <= n; i++) {
let eq1 = sum - i;
let eq2 = i + 1;
if (eq1 % eq2 === 0) {
let a = eq1 / eq2;
if (a < n && a != i) {
return [[a, b], [b, a]];
}
}
}
You can solve this in linear time with two pointers method (page 77 in the book).
In order to gain intuition towards a solution, let's start thinking about this part of your code:
for(let i = Math.round(n/2); i < length; i++) {
for(let y = Math.round(n/2); y < length; y++) {
...
You already figured out this is the part of your code that is slow. You are trying every combination of i and y, but what if you didn't have to try every single combination?
Let's take a small example to illustrate why you don't have to try every combination.
Suppose n == 10 so we have 1 2 3 4 5 6 7 8 9 10 where sum = 55.
Suppose the first combination we tried was 1*10.
Does it make sense to try 1*9 next? Of course not, since we know that 1*10 < 55-10-1 we know we have to increase our product, not decrease it.
So let's try 2*10. Well, 20 < 55-10-2 so we still have to increase.
3*10==30 < 55-3-10==42
4*10==40 < 55-4-10==41
But then 5*10==50 > 55-5-10==40. Now we know we have to decrease our product. We could either decrease 5 or we could decrease 10, but we already know that there is no solution if we decrease 5 (since we tried that in the previous step). So the only choice is to decrease 10.
5*9==45 > 55-5-9==41. Same thing again: we have to decrease 9.
5*8==40 < 55-5-8==42. And now we have to increase again...
You can think about the above example as having 2 pointers which are initialized to the beginning and end of the sequence. At every step we either
move the left pointer towards right
or move the right pointer towards left
In the beginning the difference between pointers is n-1. At every step the difference between pointers decreases by one. We can stop when the pointers cross each other (and say that no solution can be obtained if one was not found so far). So clearly we can not do more than n computations before arriving at a solution. This is what it means to say that the solution is linear with respect to n; no matter how large n grows, we never do more than n computations. Contrast this to your original solution, where we actually end up doing n^2 computations as n grows large.
Hassan is correct, here is a full solution:
function removeNb (n) {
var a = 1;
var d = 1;
// Calculate the sum of the numbers 1-n without anything removed
var S = 0.5 * n * (2*a + (d *(n-1)));
// For each possible value of b, calculate a if it exists.
var results = [];
for (let numB = a; numB <= n; numB++) {
let eq1 = S - numB;
let eq2 = numB + 1;
if (eq1 % eq2 === 0) {
let numA = eq1 / eq2;
if (numA < n && numA != numB) {
results.push([numA, numB]);
results.push([numB, numA]);
}
}
}
return results;
}
In case it's of interest, CY Aries pointed this out:
ab + a + b = n(n + 1)/2
add 1 to both sides
ab + a + b + 1 = (n^2 + n + 2) / 2
(a + 1)(b + 1) = (n^2 + n + 2) / 2
so we're looking for factors of (n^2 + n + 2) / 2 and have some indication about the least size of the factor. This doesn't necessarily imply a great improvement in complexity for the actual search but still it's kind of cool.
This is part comment, part answer.
In engineering terms, the original function posted is using "brute force" to solve the problem, iterating every (or more than needed) possible combinations. The number of iterations is n is large - if you did all possible it would be
n * (n-1) = bazillio n
Less is More
So lets look at things that can be optimized, first some minor things, I'm a little confused about the first for loop and nArray:
// OP's code
for(let i = 1; i <= n; i++) {
nArray.push(n - (n - i));
sum += i;
}
??? You don't really use nArray for anything? Length is just n .. am I so sleep deprived I'm missing something? And while you can sum a consecutive sequence of integers 1-n by using a for loop, there is a direct and easy way that avoids a loop:
sum = ( n + 1 ) * n * 0.5 ;
THE LOOPS
// OP's loops, not optimized
for(let i = Math.round(n/2); i < length; i++) {
for(let y = Math.round(n/2); y < length; y++) {
if(i != y) {
if(i*y === sum - i - y) {
Optimization Considerations:
I see you're on the right track in a way, cutting the starting i, y values in half since the factors . But you're iterating both of them in the same direction : UP. And also, the lower numbers look like they can go a little below half of n (perhaps not because the sequence start at 1, I haven't confirmed that, but it seems the case).
Plus we want to avoid division every time we start an instantiation of the loop (i.e set the variable once, and also we're going to change it). And finally, with the IF statements, i and y will never be equal to each other the way we're going to create the loops, so that's a conditional that can vanish.
But the more important thing is the direction of transversing the loops. The smaller factor low is probably going to be close to the lowest loop value (about half of n) and the larger factor hi is probably going to be near the value of n. If we has some solid math theory that said something like "hi will never be less than 0.75n" then we could make a couple mods to take advantage of that knowledge.
The way the loops are show below, they break and iterate before the hi and low loops meet.
Moreover, it doesn't matter which loop picks the lower or higher number, so we can use this to shorten the inner loop as number pairs are tested, making the loop smaller each time. We don't want to waste time checking the same pair of numbers more than once! The lower factor's loop will start a little below half of n and go up, and the higher factor's loop will start at n and go down.
// Code Fragment, more optimized:
let nHi = n;
let low = Math.trunc( n * 0.49 );
let sum = ( n + 1 ) * n * 0.5 ;
// While Loop for the outside (incrementing) loop
while( low < nHi ) {
// FOR loop for the inside decrementing loop
for(let hi = nHi; hi > low; hi--) {
// If we're higher than the sum, we exit, decrement.
if( hi * low + hi + low > sum ) {
continue;
}
// If we're equal, then we're DONE and we write to array.
else if( hi * low + hi + low === sum) {
answersArray.push([hi, low]);
low = nHi; // Note this is if we want to end once finding one pair
break; // If you want to find ALL pairs for large numbers then replace these low = nHi; with low++;
}
// And if not, we increment the low counter and restart the hi loop from the top.
else {
low++;
break;
}
} // close for
} // close while
Tutorial:
So we set the few variables. Note that low is set slightly less than half of n, as larger numbers look like they could be a few points less. Also, we don't round, we truncate, which is essentially "always rounding down", and is slightly better for performance, (though it dosenit matter in this instance with just the single assignment).
The while loop starts at the lowest value and increments, potentially all the way up to n-1. The hi FOR loop starts at n (copied to nHi), and then decrements until the factor are found OR it intercepts at low + 1.
The conditionals:
First IF: If we're higher than the sum, we exit, decrement, and continue at a lower value for the hi factor.
ELSE IF: If we are EQUAL, then we're done, and break for lunch. We set low = nHi so that when we break out of the FOR loop, we will also exit the WHILE loop.
ELSE: If we get here it's because we're less than the sum, so we need to increment the while loop and reset the hi FOR loop to start again from n (nHi).
So im running into an odd error, where im summing all fibonnaci numbers that are odd and LESS than a number.
the odd thing is this works with low values, but when I get to upper values past 10 or so.....it'll crash codepen.io
here is what I have so far:
function f(n)
{
if(n <= 1)
return n;
return f(n-1)+f(n-2);
}
function sumFibs(num) {
var counter = 0;
var arr = [];
//Get all Fibbonaci Numbers up to num
for(let i = 1;i <= num;i++)
{
arr.push(f(i));
}
for(let j = 0;j < arr.length;j++)
{
if(arr[j] % 2 != 0 && arr[j] <=num)
{
counter+= arr[j];
}
}
console.log(counter);
return counter;
}
sumFibs(10);
Basically I calculate fib up to the num and then I go through each odd one thats less than or equal to num and add those up.
Im getting correct values (IE for 10 i get 10, for 4 i get 5....etc...)
but if I put in something like 1000 it seems to just crash? and I can't seem to figure out any reason why?
The recursive f() function is a logical way to express a Fibonacci number calculation, but it isn't very efficient compared to an iterative approach, especially because you are calling it repeatedly from inside a loop. I think this is bringing your browser to a halt. Within the loop each time you call f() it is calculating the specified Fibonacci number from scratch by recursively calling itself. So, say, to get f(10), it calls itself twice with f(9) + f(8) (and then they in turn call f(8)+f(7) and f(7)+f(6), etc., so even that is pretty inefficient), but in fact you already know what f(9) and f(8) are because you've stored those values in your array on previous loop iterations.
If you change your loop to calculate each subsequent number directly rather than calling another function you get much faster code:
var arr = [1, 1]; // start with the known first two numbers
//Get all Fibbonaci Numbers up to num
for(let i = 2; i < num; i++) // start the loop at index 2 for the third number
{
arr[i] = arr[i-2] + arr[i-1];
}
With that change in place, your sumFibs() function can give you results even for sumFibs(1000000) in a matter of milliseconds:
function sumFibs(num) {
var counter = 0;
var arr = [1, 1];
//Get all Fibbonaci Numbers up to num
for (let i = 2; i < num; i++) {
arr[i] = arr[i - 2] + arr[i - 1];
}
for (let j = 0; j < arr.length; j++) {
if (arr[j] % 2 != 0) {
counter += arr[j];
}
}
return counter;
}
console.log('10: ' + sumFibs(10));
console.log('100: ' + sumFibs(100));
console.log('1000: ' + sumFibs(1000));
console.log('10000: ' + sumFibs(10000));
console.time('High fib');
console.log('1000000: ' + sumFibs(1000000));
console.timeEnd('High fib');
Note that you also had a logic error in your second loop, the one that adds up the odd numbers: the && arr[j] <=num part needed to be removed. The values in arr are the actual Fibonacci numbers, but num is the sequence number, so it doesn't make sense to be comparing them. You just want every odd number in the whole array.
However, the return value from your function is still going to be incorrect if num is too large. That's because by the time you get to the 80-somethingth Fibonacci number it is larger than JavaScript can handle without losing precision, i.e., larger than Number.MAX_SAFE_INTEGER, 9,007,199,254,740,991 (which is 2^53 - 1). Numbers above that start getting rounded so your tests for odd numbers aren't reliable and thus the total sum doesn't include all of the numbers it should have, or if you add too many JS considers your result to be Infinity.
I have the following function:
function myFunction(array, sum){
for(var i = 0; i < array.length; i++){
var firstValue = array[i];
for(var x = i + 1; x < array.length; x++){
var secondValue = array[x];
if((firstValue + secondValue) == sum){
return i + ": " + x;
}
}
}
return "No two array values match the sum";
}
The above takes two parameters; first an array and the second a sum. It finds the first two numbers in the array that when added sums to the second parameter. The array indexes of the two numbers that sum to the second parameter is then returned. Right now, the above function solves the problem in n^2 time. Is there a way to solve the same problem in kn time?
I wrote the function in JavaScript, but this can apply to all modern languages.
It is 2-sum problem. Generally there are 2 efficient algorithms for this problem.
1) Hash method, you create a hash table for each value and for each value v, just look up sum-v.
function myFunction(array, sum){
hash h;
for(var i = 0; i < array.length; i++){
if (h.find(sum-array[i])) return "found";
h.insert(array[i]);
}
return "No two array values match the sum";
}
The time and space complexity are both O(N).
2) Another method is sort the array first and find with 2 index.
function myFunction(array, sum){
sort(arrary);
var i = 0, j = array.length-1;
while (i < j) {
if (array[i] + array[j] == sum) return "Found";
else if (array[i] + array[j] > sum) --j;
else ++i;
}
return "No two array values match the sum";
}
It cost O(NlnN) time and O(1) space.
To return the 2-index of the value, you should store the index in the hash table and the array you sort.
For this problem, there are two approaches, which are all based on the property of a sorted array.
First step, sort the array, which require O(nlogn) time complexity.
Second step:
Approach A: iterating through each value array[i] in the sorted array, use binary search to find in this array value : sum - array[i]. Time complexity for this step is O(nlogn) (or even O(n) if we limit the range for searching is starting from i onward)
Approach B: maintaining two pointers, start points at the beginning of the array ( the smallest value) and end points at the end of the array ( the largest value). So we will increase the value of start one by one until start is no longer less than end. For each value where start points to, we compare the value sum - array[start] with array[end]. If value array[end] > sum - array[start], we decrease end, else if array[end] < sum - array[start], return null. Time complexity for this step is O(n).
In total, both approaches give a O(nlogn) time complexity.
I'm not sure what you mean by k, but if k stands for the sum, and the array contains only non negative integers, then you can do it in O(k*n) time. This can be useful when k << n, and it has the benefits of O(1) additional memory, and no modification of the original array.
The idea is to scan the array k+1 times, using i = 0..k. On each scan, the values i and k-i are searched. If both are found, return their positions. If all k+1 scans complete with no match, then the sum can't be achieved.