Server Engineer @ SundayToz Blog https://mingrammer.com Facebook https://facebook.com/mingrammer Github https://github.com/mingrammer Eng Blog https://medium.com/@mingrammer
server ip') shutdown(local_server) clear_dump(port) def main(start, end): ... pool = Pool(processes=8) pool.starmap(run, zip(local_ports, legacy_servers)) Data Migration Process
= r.info('replication')['master_sync_in_progress'] if master_link_status == 'up' and master_sync_in_progress == 0: r.slaveof() break print('[{}] [{}|{}] All keys is fetched.'.format(now(), src, dst)) Data Migration Process (Synchronization with Replication) Sync and check done
= r.info('keyspace') print('[{}] [{}|{}] Started migrating.'.format(now(), src, dst)) jobs = [gevent.spawn(migrate, src, dst, int(k[2:])) for k in keyspace.keys()] gevent.joinall(jobs) print('[{}] [{}|{}] Migration was done.'.format(now(), src, dst)) Data Migration Process (Dump & Restore) Parallelism by keyspace
... while True: # 2,500ѐ ః ࣽഥ cursor, keys = srcr.scan(cursor, count=count) pipeline = srcr.pipeline(transaction=False) # dump for key in keys: pipeline.pttl(key) pipeline.dump(key) result = pipeline.execute() # restore pipeline = dstr.pipeline(transaction=False) for key, ttl, data in zip(keys, result[::2], result[1::2]): if data != None: pipeline.restore(key, ttl + 10800000 if ttl > 0 else 0, data) pipeline.execute(False) ... if cursor == 0: break Data Migration Process (Dump & Restore) Migrate all keys with pipelining
server ip') shutdown(local_server) clear_dump(port) def main(start, end): ... pool = Pool(processes=8) pool.starmap(run, zip(local_ports, legacy_servers)) Data Migration Process