efficiently convert uneven list of lists to minimal containing array padded with nan

consider the list of lists l

l = [[1, 2, 3], [1, 2]]

if I convert this to a np.array I'll get a one dimensional object array with [1, 2, 3] in the first position and [1, 2] in the second position.

print(np.array(l))

[[1, 2, 3] [1, 2]]

I want this instead

print(np.array([[1, 2, 3], [1, 2, np.nan]]))

[[  1.   2.   3.]
 [  1.   2.  nan]]

I can do this with a loop, but we all know how unpopular loops are

def box_pir(l):
    lengths = [i for i in map(len, l)]
    shape = (len(l), max(lengths))
    a = np.full(shape, np.nan)
    for i, r in enumerate(l):
        a[i, :lengths[i]] = r
    return a

print(box_pir(l))

[[  1.   2.   3.]
 [  1.   2.  nan]]

how do I do this in a fast, vectorized way?


timing

enter image description here

enter image description here

setup functions

%%cython
import numpy as np

def box_pir_cython(l):
    lengths = [len(item) for item in l]
    shape = (len(l), max(lengths))
    a = np.full(shape, np.nan)
    for i, r in enumerate(l):
        a[i, :lengths[i]] = r
    return a

def box_divikar(v):
    lens = np.array([len(item) for item in v])
    mask = lens[:,None] > np.arange(lens.max())
    out = np.full(mask.shape, np.nan)
    out[mask] = np.concatenate(v)
    return out

def box_hpaulj(LoL):
    return np.array(list(zip_longest(*LoL, fillvalue=np.nan))).T

def box_simon(LoL):
    max_len = len(max(LoL, key=len))
    return np.array([x + [np.nan]*(max_len-len(x)) for x in LoL])

def box_dawg(LoL):
    cols=len(max(LoL, key=len))
    rows=len(LoL)
    AoA=np.empty((rows,cols, ))
    AoA.fill(np.nan)
    for idx in range(rows):
        AoA[idx,0:len(LoL[idx])]=LoL[idx]
    return AoA

def box_pir(l):
    lengths = [len(item) for item in l]
    shape = (len(l), max(lengths))
    a = np.full(shape, np.nan)
    for i, r in enumerate(l):
        a[i, :lengths[i]] = r
    return a

def box_pandas(l):
    return pd.DataFrame(l).values

Solution 1:

This seems to be a close one of this question, where the padding was with zeros instead of NaNs. Interesting approaches were posted there, along with mine based on broadcasting and boolean-indexing. So, I would just modify one line from my post there to solve this case like so -

def boolean_indexing(v, fillval=np.nan):
    lens = np.array([len(item) for item in v])
    mask = lens[:,None] > np.arange(lens.max())
    out = np.full(mask.shape,fillval)
    out[mask] = np.concatenate(v)
    return out

Sample run -

In [32]: l
Out[32]: [[1, 2, 3], [1, 2], [3, 8, 9, 7, 3]]

In [33]: boolean_indexing(l)
Out[33]: 
array([[  1.,   2.,   3.,  nan,  nan],
       [  1.,   2.,  nan,  nan,  nan],
       [  3.,   8.,   9.,   7.,   3.]])

In [34]: boolean_indexing(l,-1)
Out[34]: 
array([[ 1,  2,  3, -1, -1],
       [ 1,  2, -1, -1, -1],
       [ 3,  8,  9,  7,  3]])

I have posted few runtime results there for all the posted approaches on that Q&A, which could be useful.

Solution 2:

Probably the fastest list version uses itertools.zip_longest (may be izip_longest in Py2):

In [747]: np.array(list(itertools.zip_longest(*ll,fillvalue=np.nan))).T
Out[747]: 
array([[  1.,   2.,   3.],
       [  1.,   2.,  nan]])

The plain zip produces:

In [748]: list(itertools.zip_longest(*ll))
Out[748]: [(1, 1), (2, 2), (3, None)]

another zip 'transposes':

In [751]: list(zip(*itertools.zip_longest(*ll)))
Out[751]: [(1, 2, 3), (1, 2, None)]

Often when starting with lists (or even an object array of lists), it is faster to stick with list methods. There's an substantial overhead in creating an array or dataframe.

This isn't the first time this question has been asked.

How can I pad and/or truncate a vector to a specified length using numpy?

My answer there includes both this zip_longest and your box_pir

I think there's also a fast numpy version using a flattened array, but I don't recall the details. It was probably given by Warren or Divakar.

I think the 'flattened' version works something along this line:

In [809]: ll
Out[809]: [[1, 2, 3], [1, 2]]
In [810]: sll=np.hstack(ll)     # all values in a 1d array
In [816]: res=np.empty((2,3)); res.fill(np.nan)  # empty target

get flattened indices where values go. This is the crucial step. Here the use of r_ is iterative; the fast version probably uses cumsum

In [817]: idx=np.r_[0:3, 3:3+2]
In [818]: idx
Out[818]: array([0, 1, 2, 3, 4])
In [819]: res.flat[idx]=sll
In [820]: res
Out[820]: 
array([[  1.,   2.,   3.],
       [  1.,   2.,  nan]])

================

so the missing link is >np.arange() broadcasting

In [897]: lens=np.array([len(i) for i in ll])
In [898]: mask=lens[:,None]>np.arange(lens.max())
In [899]: mask
Out[899]: 
array([[ True,  True,  True],
       [ True,  True, False]], dtype=bool)
In [900]: idx=np.where(mask.ravel())
In [901]: idx
Out[901]: (array([0, 1, 2, 3, 4], dtype=int32),)

Solution 3:

I might write this as a form of slice assignment on each of the sub arrays that have been filled with a default:

def to_numpy(LoL, default=np.nan):
    cols=len(max(LoL, key=len))
    rows=len(LoL)
    AoA=np.empty((rows,cols, ))
    AoA.fill(default)
    for idx in range(rows):
        AoA[idx,0:len(LoL[idx])]=LoL[idx]
    return AoA

I added in Divakar's Boolean Indexing as f4 and added to the timing testing. At least on my testing, (Python 2.7 and Python 3.5; Numpy 1.11) it is not the fastest.

Timing shows that izip_longest or f2 is slightly faster for most lists but slice assignment (which is f1) is faster for larger lists:

from __future__ import print_function
import numpy as np
try:
    from itertools import izip_longest as zip_longest 
except ImportError:
    from itertools import zip_longest   

def f1(LoL):
    cols=len(max(LoL, key=len))
    rows=len(LoL)
    AoA=np.empty((rows,cols, ))
    AoA.fill(np.nan)
    for idx in range(rows):
        AoA[idx,0:len(LoL[idx])]=LoL[idx]
    return AoA

def f2(LoL):
    return np.array(list(zip_longest(*LoL,fillvalue=np.nan))).T

def f3(LoL):
    max_len = len(max(LoL, key=len))
    return np.array([x + [np.nan]*(max_len-len(x)) for x in LoL])

def f4(LoL):
    lens = np.array([len(item) for item in LoL])
    mask = lens[:,None] > np.arange(lens.max())
    out = np.full(mask.shape,np.nan)
    out[mask] = np.concatenate(LoL)
    return out  

if __name__=='__main__':
    import timeit   
    for case, LoL in (('small', [list(range(20)), list(range(30))] * 1000),
                      ('medium', [list(range(20)), list(range(30))] * 10000),
                      ('big', [list(range(20)), list(range(30))] * 100000),
                      ('huge', [list(range(20)), list(range(30))] * 1000000)):
        print(case)
        for f in (f1, f2, f3, f4):
            print("   ",f.__name__, timeit.timeit("f(LoL)", setup="from __main__ import f, LoL", number=100) )      

Prints:

small
    f1 0.245459079742
    f2 0.209980010986
    f3 0.350691080093
    f4 0.332141160965
medium
    f1 2.45869493484
    f2 2.32307982445
    f3 3.65722203255
    f4 3.55545687675
big
    f1 25.8796288967
    f2 26.6177148819
    f3 41.6916451454
    f4 41.3140149117
huge
    f1 262.429639101
    f2 295.129109859
    f3 427.606887817
    f4 441.810388088

Solution 4:

Maybe something like this? Don't know about your hardware, but means at 16ms for 100 loops for l2 = [list(range(20)), list(range(30))] * 10000.

from numpy import nan


def box(l):
    max_lenght = len(max(l, key=len))
    return [x + [nan]*(max_lenght-len(x)) for x in l]