if args.l_cache:
filename = ("saved/cache/" + args.files[0][:-4] + "_" +
str(image.size[0]))
+ cache_dir = "/".join(filename.split('/')[:-1])
if os.path.exists(filename):
lines, l1, l2, bounds, hough = pickle.load(open(filename))
print >> sys.stderr, "using cached results"
im_h = None
else:
lines, l1, l2, bounds, hough, im_h = linef.find_lines(image, show_all, do_something, verbose)
- if not os.path.isdir("saved/cache"):
- os.makedirs("saved/cache")
+ if not os.path.isdir(cache_dir):
+ os.makedirs(cache_dir)
d_file = open(filename, 'wb')
pickle.dump((lines, l1, l2, bounds, hough), d_file)
d_file.close()
import multiprocessing
from functools import partial
-def particle(dimension, bound, func_d):
+def particle(dimension, bound, v_max, func_d):
position = [2 * bound * random.random() - bound for _ in xrange(dimension)]
- velocity = [2 * bound * random.random() - bound for _ in xrange(dimension)]
+ velocity = [2 * v_max * random.random() - v_max for _ in xrange(dimension)]
value = func_d(*position)
return value, position, velocity, value, position
def optimize(dimension, boundary, function_d, n_parts, n_turns):
pool = multiprocessing.Pool(None)
- particles = [particle(dimension, boundary, function_d)
+ v_max = 30.
+ particles = [particle(dimension, boundary, v_max, function_d)
for _ in xrange(n_parts)]
gl_best = max(particles)
for _ in xrange(n_turns):
- move_p = partial(move, omega=0.9, phi_p=0.9, phi_g=0.2, v_max=20.,
+ move_p = partial(move,
+ omega=0.98, phi_p=2.75, phi_g=3., v_max=v_max,
global_best=gl_best[1], func_d=function_d)
particles = pool.map(move_p, particles)
gl_best = max(max(particles), gl_best)