Skip to content

Commit

Permalink
[AutoScheduler] Add tips on resuming the search from a log file (apac…
Browse files Browse the repository at this point in the history
…he#7039)

* [AutoScheduler] Add tips on resuming the search from a log file

* Trigger CI
  • Loading branch information
merrymercy authored and trevor-m committed Jan 21, 2021
1 parent 37f78bd commit 49c8874
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 9 deletions.
2 changes: 1 addition & 1 deletion python/tvm/auto_scheduler/search_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class PreloadMeasuredStates(SearchCallback):
The name of the record file.
"""

def __init__(self, filename="auto_scheduler_tuning.json"):
def __init__(self, filename):
self.__init_handle_by_constructor__(_ffi_api.PreloadMeasuredStates, filename)


Expand Down
15 changes: 13 additions & 2 deletions python/tvm/auto_scheduler/task_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

import numpy as np

from .search_policy import SearchPolicy, SketchPolicy
from .search_policy import SearchPolicy, SketchPolicy, PreloadMeasuredStates
from .cost_model import RandomModel, XGBModel
from .utils import array_mean
from .measure import ProgramMeasurer
Expand Down Expand Up @@ -94,8 +94,19 @@ def make_search_policies(
raise ValueError("Invalid search policy: " + search_policy)

if policy_type == "sketch":
if load_log_file:
# use the log file to restore the status of search policies.
init_search_callbacks = [PreloadMeasuredStates(load_log_file)]
else:
init_search_callbacks = None
search_policies = [
SketchPolicy(task, cost_model, params=search_policy_params, verbose=verbose)
SketchPolicy(
task,
cost_model,
params=search_policy_params,
verbose=verbose,
init_search_callbacks=init_search_callbacks,
)
for task in tasks
]
else:
Expand Down
10 changes: 7 additions & 3 deletions tutorials/auto_scheduler/tune_network_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,10 +299,14 @@ def run_tuning():
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster search.
# 2. If you have multiple target GPUs, you can use all of them for measurements to
# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill --i log.json`
# to distill the large log file and only save the best useful records.
# 3. You can resume a search from the previous log file. You just need to
# add a new argument :code:`load_log_file` when creating the task scheduler
# in function :code:`run_tuning`. Say,
# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`
# 4. If you have multiple target GPUs, you can use all of them for measurements to
# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
# 3. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill --i log.json`
# to distill the large log file and only save the best useful records.
10 changes: 7 additions & 3 deletions tutorials/auto_scheduler/tune_network_x86.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,10 +298,14 @@ def run_tuning():
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster search.
# 2. If you have multiple target CPUs, you can use all of them for measurements to
# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill --i log.json`
# to distill the large log file and only save the best useful records.
# 3. You can resume a search from the previous log file. You just need to
# add a new argument :code:`load_log_file` when creating the task scheduler
# in function :code:`run_tuning`. Say,
# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`
# 4. If you have multiple target CPUs, you can use all of them for measurements to
# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
# 3. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill --i log.json`
# to distill the large log file and only save the best useful records.

0 comments on commit 49c8874

Please sign in to comment.