Why is pop faster than shift?
Solution 1:
To remove the returned item without re-addressing the array and invalidating all references to it, shift()
requires moving the entire array around; pop()
can simply subtract 1 from its length.
Solution 2:
shift()
has to re-index the whole array while pop()
doesn't.
pop()
simply removes the last element in the array. Therefore, the elements do not move; simply the .length
has to be updated.
shift()
removes the first element in the array. This requires a re-indexing of all elements in the array, so that [1]
becomes [0]
and so on.
Solution 3:
I was doing some tests on this with node (which uses chrome v8) and noticed that for arrays up to around 120k elements the performance of shift is pretty close to pop. Once you get bigger than 120K it seems to slow down dramatically.
var sum;
var tests = [125000,130000];
console.log(JSON.stringify(process.versions));
tests.forEach(function(count) {
console.log('Testing arrays of size ' + count);
var s1 = Date.now();
var sArray = new Array(count);
var pArray = new Array(count);
for (var i = 0; i < count ; i++) {
var num = Math.floor(Math.random() * 6) + 1
sArray[i] = num;
pArray[i] = num;
}
console.log(' -> ' + (Date.now() - s1) + 'ms: built arrays with ' + count + ' random elements');
s1 = Date.now();
sum = 0;
while (pArray.length) {
sum += pArray.pop();
}
console.log(' -> ' + (Date.now() - s1) + 'ms: sum with pop() ' + count + ' elements, sum = ' + sum);
s1 = Date.now();
sum = 0;
while (sArray.length) {
sum += sArray.shift();
}
console.log(' -> ' + (Date.now() - s1) + 'ms: sum with shift() ' + count + ' elements, sum = ' + sum);
});
Output:
{"http_parser":"1.0","node":"0.10.22","v8":"3.14.5.9","ares":"1.9.0-DEV","uv":"0.10.19","zlib":"1.2.3","modules":"11","openssl":"1.0.1e"}
Testing arrays of size 125000
-> 14ms: built arrays with 125000 random elements
-> 2ms: sum with pop() 125000 elements, sum = 436673
-> 6ms: sum with shift() 125000 elements, sum = 436673
Testing arrays of size 130000
-> 50ms: built arrays with 130000 random elements
-> 1ms: sum with pop() 130000 elements, sum = 455971
-> 54372ms: sum with shift() 130000 elements, sum = 455971
Solution 4:
Because shift() reindex array so the shift method is very slow on large array.
var array = [];
for(var i = 0;i< 1000000;i++){
array.push(i)
}
var start = new Date().getTime()
for(var i = 0; i< 100000; i++){
array.shift();
}
var duration = new Date().getTime() - start;// duration is so large, greater than 3 minutes
But the duration is just 8ms when using linked-queue
var LQueue = require('linked-queue')
var queue = new LQueue()
for(var i = 0;i< 1000000;i++){
queue.enqueue(i);
}
console.log("Queue length:"+ queue.length);
var start = new Date().getTime()
queue.dequeueAll(function(data){
})
var end = new Date().getTime();
console.log("Time:" + (end - start));// 8 ms
console.log("Queue length:"+ queue.length);
Solution 5:
The difference can be negligible—Unoptimized executors may run shift
much slower than pop
, but optimized ones won't.
You can optimize like this:
let WrapArray = _=>{
//Ensure no other ref to `_`.
let numlike = _=>isNaN(_)?false:true
let num = _=>Number(_)
{
let shift_q = 0
return new Proxy(_, {
get(first_t, k){
switch(k){
case 'shift': return (z={})=>(z.r=first_t[0 + shift_q], delete first_t[0 + shift_q++], z.r)
break; case 'length': return first_t.length - shift_q
break; default: return first_t[numlike(k)?num(k) +/*todo overflowguard*/shift_q:k]
}
},
set(first_t, k, v){
switch(k){
case 'length': first_t.length = v + shift_q
break; default: first_t[numlike(k)?num(k) +/*todo overflowguard*/shift_q:k] = v
}
},
has(first_t, k){
return (numlike(k)?num(k) +/*todo overflowguard*/shift_q:k) in first_t
},
deleteProperty(first_t, k){
delete first_t[numlike(k)?num(k) +/*todo overflowguard*/shift_q:k];return 543
},
apply(first_t, t, s){
first_t.call(t, s)
},
construct(first_t, s, t){
new first_t(...s)
},
})
}
}
(_=WrapArray(['a','b','c'])).shift()
console.log(_.length/*2*/, _[0]/*b*/, _[1]/*c*/, _[2]/*undefined*/)