timeit versus timing decorator
I'm trying to time some code. First I used a timing decorator:
#!/usr/bin/env python
import time
from itertools import izip
from random import shuffle
def timing_val(func):
def wrapper(*arg, **kw):
'''source: http://www.daniweb.com/code/snippet368.html'''
t1 = time.time()
res = func(*arg, **kw)
t2 = time.time()
return (t2 - t1), res, func.__name__
return wrapper
@timing_val
def time_izip(alist, n):
i = iter(alist)
return [x for x in izip(*[i] * n)]
@timing_val
def time_indexing(alist, n):
return [alist[i:i + n] for i in range(0, len(alist), n)]
func_list = [locals()[key] for key in locals().keys()
if callable(locals()[key]) and key.startswith('time')]
shuffle(func_list) # Shuffle, just in case the order matters
alist = range(1000000)
times = []
for f in func_list:
times.append(f(alist, 31))
times.sort(key=lambda x: x[0])
for (time, result, func_name) in times:
print '%s took %0.3fms.' % (func_name, time * 1000.)
yields
% test.py
time_indexing took 73.230ms.
time_izip took 122.057ms.
And here I use timeit:
% python - m timeit - s '' 'alist=range(1000000);[alist[i:i+31] for i in range(0, len(alist), 31)]'
10 loops, best of 3:
64 msec per loop
% python - m timeit - s 'from itertools import izip' 'alist=range(1000000);i=iter(alist);[x for x in izip(*[i]*31)]'
10 loops, best of 3:
66.5 msec per loop
Using timeit the results are virtually the same, but using the timing decorator it appears time_indexing
is faster than time_izip
.
What accounts for this difference?
Should either method be believed?
If so, which?
Use wrapping from functools
to improve Matt Alcock's answer.
from functools import wraps
from time import time
def timing(f):
@wraps(f)
def wrap(*args, **kw):
ts = time()
result = f(*args, **kw)
te = time()
print 'func:%r args:[%r, %r] took: %2.4f sec' % \
(f.__name__, args, kw, te-ts)
return result
return wrap
In an example:
@timing
def f(a):
for _ in range(a):
i = 0
return -1
Invoking method f
wrapped with @timing
:
func:'f' args:[(100000000,), {}] took: 14.2240 sec
f(100000000)
The advantage of this is that it preserves attributes of the original function; that is, metadata like the function name and docstring is correctly preserved on the returned function.
I would use a timing decorator, because you can use annotations to sprinkle the timing around your code rather than making you code messy with timing logic.
import time
def timeit(f):
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
print 'func:%r args:[%r, %r] took: %2.4f sec' % \
(f.__name__, args, kw, te-ts)
return result
return timed
Using the decorator is easy either use annotations.
@timeit
def compute_magic(n):
#function definition
#....
Or re-alias the function you want to time.
compute_magic = timeit(compute_magic)
Use timeit. Running the test more than once gives me much better results.
func_list=[locals()[key] for key in locals().keys()
if callable(locals()[key]) and key.startswith('time')]
alist=range(1000000)
times=[]
for f in func_list:
n = 10
times.append( min( t for t,_,_ in (f(alist,31) for i in range(n))))
for (time,func_name) in zip(times, func_list):
print '%s took %0.3fms.' % (func_name, time*1000.)
->
<function wrapper at 0x01FCB5F0> took 39.000ms.
<function wrapper at 0x01FCB670> took 41.000ms.