diff --git a/python/tvm/auto_scheduler/search_policy.py b/python/tvm/auto_scheduler/search_policy.py index 6f565edbd378..5b15a48943d2 100644 --- a/python/tvm/auto_scheduler/search_policy.py +++ b/python/tvm/auto_scheduler/search_policy.py @@ -57,7 +57,7 @@ class PreloadMeasuredStates(SearchCallback): The name of the record file. """ - def __init__(self, filename="auto_scheduler_tuning.json"): + def __init__(self, filename): self.__init_handle_by_constructor__(_ffi_api.PreloadMeasuredStates, filename) diff --git a/python/tvm/auto_scheduler/task_scheduler.py b/python/tvm/auto_scheduler/task_scheduler.py index a3dbcae64b60..ab83ff40c461 100644 --- a/python/tvm/auto_scheduler/task_scheduler.py +++ b/python/tvm/auto_scheduler/task_scheduler.py @@ -29,7 +29,7 @@ import numpy as np -from .search_policy import SearchPolicy, SketchPolicy +from .search_policy import SearchPolicy, SketchPolicy, PreloadMeasuredStates from .cost_model import RandomModel, XGBModel from .utils import array_mean from .measure import ProgramMeasurer @@ -94,8 +94,19 @@ def make_search_policies( raise ValueError("Invalid search policy: " + search_policy) if policy_type == "sketch": + if load_log_file: + # use the log file to restore the status of search policies. + init_search_callbacks = [PreloadMeasuredStates(load_log_file)] + else: + init_search_callbacks = None search_policies = [ - SketchPolicy(task, cost_model, params=search_policy_params, verbose=verbose) + SketchPolicy( + task, + cost_model, + params=search_policy_params, + verbose=verbose, + init_search_callbacks=init_search_callbacks, + ) for task in tasks ] else: diff --git a/tutorials/auto_scheduler/tune_network_cuda.py b/tutorials/auto_scheduler/tune_network_cuda.py index 03be05abd363..3da9f3fc577b 100644 --- a/tutorials/auto_scheduler/tune_network_cuda.py +++ b/tutorials/auto_scheduler/tune_network_cuda.py @@ -299,10 +299,14 @@ def run_tuning(): # 1. During the tuning, the auto-scheduler needs to compile many programs and # extract feature from them. This part is CPU-intensive, # so a high-performance CPU with many cores is recommended for faster search. -# 2. If you have multiple target GPUs, you can use all of them for measurements to +# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill --i log.json` +# to distill the large log file and only save the best useful records. +# 3. You can resume a search from the previous log file. You just need to +# add a new argument :code:`load_log_file` when creating the task scheduler +# in function :code:`run_tuning`. Say, +# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)` +# 4. If you have multiple target GPUs, you can use all of them for measurements to # parallelize the measurements. Check this :ref:`section ` # to learn how to use the RPC Tracker and RPC Server. # To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions` # with :any:`auto_scheduler.RPCRunner`. -# 3. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill --i log.json` -# to distill the large log file and only save the best useful records. diff --git a/tutorials/auto_scheduler/tune_network_x86.py b/tutorials/auto_scheduler/tune_network_x86.py index aba75b253e0c..a491759ab128 100644 --- a/tutorials/auto_scheduler/tune_network_x86.py +++ b/tutorials/auto_scheduler/tune_network_x86.py @@ -298,10 +298,14 @@ def run_tuning(): # 1. During the tuning, the auto-scheduler needs to compile many programs and # extract feature from them. This part is CPU-intensive, # so a high-performance CPU with many cores is recommended for faster search. -# 2. If you have multiple target CPUs, you can use all of them for measurements to +# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill --i log.json` +# to distill the large log file and only save the best useful records. +# 3. You can resume a search from the previous log file. You just need to +# add a new argument :code:`load_log_file` when creating the task scheduler +# in function :code:`run_tuning`. Say, +# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)` +# 4. If you have multiple target CPUs, you can use all of them for measurements to # parallelize the measurements. Check this :ref:`section ` # to learn how to use the RPC Tracker and RPC Server. # To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions` # with :any:`auto_scheduler.RPCRunner`. -# 3. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill --i log.json` -# to distill the large log file and only save the best useful records.