forked from neo-ai/tvm
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[Ansor][AutoTVM v2.0] Phase 1: feature extraction for cost models (ap…
…ache#6190) * [AutoScheduler] add feature extraction * fix lint * fix gpu test * address comments * improve flop estimation * rebase * refactor with group * fix * Apply suggestions from code review
- Loading branch information
1 parent
8fd5969
commit 2ddb198
Showing
7 changed files
with
2,226 additions
and
13 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,122 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one | ||
* or more contributor license agreements. See the NOTICE file | ||
* distributed with this work for additional information | ||
* regarding copyright ownership. The ASF licenses this file | ||
* to you under the Apache License, Version 2.0 (the | ||
* "License"); you may not use this file except in compliance | ||
* with the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, | ||
* software distributed under the License is distributed on an | ||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
* KIND, either express or implied. See the License for the | ||
* specific language governing permissions and limitations | ||
* under the License. | ||
*/ | ||
|
||
/*! | ||
* \file auto_scheduler/feature.h | ||
* \brief Feature extraction for the cost model. | ||
* We extract one feature vector per BufferStoreNode statement in a TIR Stmt, | ||
* so we call this feature as "Per Store" feature. | ||
* The cost model also does prediction for each BufferStoreNode statement and aggregates | ||
* the predictions as the whole score for a TVM IR (Stmt). | ||
* | ||
* The feature specification is defined by `src/auto_scheduler/feature.cc:: FeatureSet` | ||
*/ | ||
|
||
#ifndef TVM_AUTO_SCHEDULER_FEATURE_H_ | ||
#define TVM_AUTO_SCHEDULER_FEATURE_H_ | ||
|
||
#include <tvm/auto_scheduler/compute_dag.h> | ||
#include <tvm/auto_scheduler/measure.h> | ||
|
||
#include <string> | ||
#include <vector> | ||
|
||
namespace tvm { | ||
namespace auto_scheduler { | ||
|
||
/*! | ||
* \brief Get per-store feature from a TIR Stmt | ||
* \param stmt The input lowered TIR statement | ||
* \param cache_line_size The size of cache line in bytes | ||
* \param max_n_bufs The maximum number of extracted buffers for one statement | ||
* \param ret The returned feature vector | ||
*/ | ||
void GetPerStoreFeature(const Stmt& stmt, int cache_line_size, int max_n_bufs, | ||
std::vector<float>* ret); | ||
|
||
/* | ||
* \brief Get the names of elements in the feature vector. Use this for debug and inspection. | ||
* \param max_n_bufs The maximum number of extracted buffers for one statement | ||
* \param ret The returned names. | ||
*/ | ||
void GetPerStoreFeatureName(int max_n_bufs, std::vector<std::string>* ret); | ||
|
||
/*! | ||
* \brief Get per-store feature from states of the same task | ||
* \param states The input states | ||
* \param task The same search task for all states | ||
* \param skip_first_n_feature_extraction Skip feature extraction for the first n states | ||
* \param max_n_bufs The maximum number of extracted buffers for one statement | ||
* \param features The returned feature vector. The innermost vector contains the | ||
* feature vectors for all BufferStoreNode statements | ||
*/ | ||
void GetPerStoreFeaturesFromStates(const Array<State>& states, const SearchTask& task, | ||
int skip_first_n_feature_extraction, int max_n_bufs, | ||
std::vector<std::vector<float> >* features); | ||
|
||
/*! | ||
* \brief Get per-store feature from states of different tasks | ||
* \param states The input states | ||
* \param tasks The search tasks corresponding to the input states | ||
* \param skip_first_n_feature_extraction Skip feature extraction for the first n states | ||
* \param max_n_bufs The maximum number of extracted buffers for one statement | ||
* \param features The returned feature vector. The innermost vector contains the | ||
* feature vectors for all BufferStoreNode statements | ||
*/ | ||
void GetPerStoreFeaturesFromStates(const Array<State>& states, const std::vector<SearchTask>& tasks, | ||
int skip_first_n_feature_extraction, int max_n_bufs, | ||
std::vector<std::vector<float> >* features); | ||
|
||
/*! | ||
* \brief Get per-store features from a log file | ||
* \param filename The name of log file | ||
* \param max_lines Only read the first n lines of the file | ||
* \param max_n_bufs The maximum number of extracted buffers for one statement | ||
* \param features The returned feature vector. The innermost vector contains the | ||
* feature vectors for all BufferStoreNode statements | ||
* \param normalized_throughputs The normalized throughputs for all states | ||
* \param task_ids The task ids for all states | ||
*/ | ||
void GetPerStoreFeaturesFromFile(const std::string& filename, int max_lines, int max_n_bufs, | ||
std::vector<std::vector<float> >* features, | ||
std::vector<float>* normalized_throughputs, | ||
std::vector<int>* task_ids); | ||
|
||
/*! | ||
* \brief Get per-store features from measurement input/result pairs | ||
* \param inputs The meaurement inputs | ||
* \param results The measurement results | ||
* \param skip_first_n_feature_extraction Skip feature extraction for the first n meaurement pairs | ||
* \param max_n_bufs The maximum number of extracted buffers for one statement | ||
* \param features The returned feature vector. The innermost vector contains the | ||
* feature vectors for all BufferStoreNode statements | ||
* \param normalized_throughputs The normalized throughputs for all states | ||
* \param task_ids The task ids for all states | ||
*/ | ||
void GetPerStoreFeaturesFromMeasurePairs(const Array<MeasureInput>& inputs, | ||
const Array<MeasureResult>& results, | ||
int skip_first_n_feature_extraction, int max_n_bufs, | ||
std::vector<std::vector<float> >* features, | ||
std::vector<float>* normalized_throughputs, | ||
std::vector<int>* task_ids); | ||
|
||
} // namespace auto_scheduler | ||
} // namespace tvm | ||
|
||
#endif // TVM_AUTO_SCHEDULER_FEATURE_H_ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,242 @@ | ||
# Licensed to the Apache Software Foundation (ASF) under one | ||
# or more contributor license agreements. See the NOTICE file | ||
# distributed with this work for additional information | ||
# regarding copyright ownership. The ASF licenses this file | ||
# to you under the Apache License, Version 2.0 (the | ||
# "License"); you may not use this file except in compliance | ||
# with the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, | ||
# software distributed under the License is distributed on an | ||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
# KIND, either express or implied. See the License for the | ||
# specific language governing permissions and limitations | ||
# under the License. | ||
|
||
"""" | ||
Python API for Feature extraction. The extracted features vector are used by cost models. | ||
We extract one feature vector per BufferStoreNode statement in a TIR Stmt, | ||
so we call this feature as "Per Store" feature. | ||
The cost model also does prediction for each BufferStoreNode statement and aggregates | ||
the predicted score of each BufferStoreNode as the score of a TIR Stmt. | ||
The feature specification is defined by `src/auto_scheduler/feature.cc::FeatureSet` | ||
""" | ||
|
||
from typing import List, Tuple, Union, Optional | ||
import struct | ||
|
||
import numpy as np | ||
|
||
from .loop_state import State, StateObject | ||
from .measure import MeasureInput, MeasureResult | ||
from . import _ffi_api | ||
|
||
# The maximum number of extracted buffers for one statement | ||
DEFAULT_MAX_N_BUFS = 5 | ||
|
||
# The length of the feature vector | ||
DEFAULT_FEATURE_VEC_LEN = 164 | ||
|
||
# The size of int and float in bytes | ||
SIZE_OF_INT32 = 4 | ||
SIZE_OF_FLOAT32 = 4 | ||
|
||
def unpack_feature(byte_arr: bytearray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: | ||
"""Unpack the flatten feature (in byte array format) from c++ | ||
Parameters | ||
---------- | ||
byte_arr: bytearray | ||
The two-dimensional feature vector in serialized byte array format | ||
Returns | ||
------- | ||
features: np.ndarray | ||
Feature vectors | ||
normalized_throughputs: np.ndarray | ||
Normalized throughputs | ||
task_ids: np.ndarray | ||
Task ids | ||
""" | ||
|
||
# The format for n records is: | ||
# { | ||
# int n; | ||
# int[n+2] sizes | ||
|
||
# float[sizes[0]] feature for record 1 | ||
# float[sizes[1]] feature for record 2 | ||
# ... feature for record i... | ||
# float[sizes[n-1]] feature for record n | ||
|
||
# float[sizes[n]] normalized throughput for n records | ||
# int[sizes[n+1]] task id for n records | ||
# } | ||
|
||
vec_len = DEFAULT_FEATURE_VEC_LEN | ||
|
||
# unpack sizes | ||
offset = 0 | ||
n = struct.unpack_from("1i", byte_arr, offset=offset)[0] | ||
offset += SIZE_OF_INT32 | ||
|
||
sizes = struct.unpack_from("%di" % (n+2), byte_arr, offset=offset) | ||
offset += SIZE_OF_INT32 * (n+2) | ||
|
||
# unpack features | ||
features = [] | ||
for size in sizes[:-2]: | ||
row = [] | ||
|
||
# Now, we need to unpack the feature for multiple statements. | ||
# The format is: | ||
# { | ||
# int n_stmts | ||
# float[n_stmt][vec_len] feature_vecs | ||
# } | ||
# where vec_len can be calculated by `(size - 1) / n_stmts` | ||
|
||
if size == 0: | ||
# failed during lowering | ||
features.append(np.zeros((1, vec_len))) | ||
else: | ||
n_stmts = struct.unpack_from("f", byte_arr, offset=offset) | ||
offset += SIZE_OF_FLOAT32 | ||
|
||
n_stmts = int(n_stmts[0] + 0.5) | ||
tmp_vec_len = (size - 1) // n_stmts | ||
assert tmp_vec_len == vec_len, "The lenght of feature vector is wrong. " \ | ||
"Expected %d but got %d." % (vec_len, tmp_vec_len) | ||
assert tmp_vec_len * n_stmts == size - 1 | ||
for _ in range(n_stmts): | ||
x = struct.unpack_from("%df" % vec_len, byte_arr, offset=offset) | ||
offset += vec_len * SIZE_OF_FLOAT32 | ||
row.append(x) | ||
|
||
features.append(np.array(row)) | ||
|
||
# unpack normalized_throughputs | ||
m = sizes[-2] | ||
normalized_throughputs = struct.unpack_from("%df" % m, byte_arr, offset=offset) | ||
offset += m * SIZE_OF_INT32 | ||
|
||
# unpack task_ids | ||
m = sizes[-1] | ||
task_ids = struct.unpack_from("%di" % m, byte_arr, offset=offset) | ||
offset += m * SIZE_OF_INT32 | ||
|
||
assert offset == len(byte_arr), "%d vs %d" % (offset, len(byte_arr)) | ||
return np.array(features, dtype=object), np.array(normalized_throughputs), np.array(task_ids) | ||
|
||
|
||
def get_per_store_features_from_file(filename: str, | ||
max_lines: int, | ||
max_n_bufs: Optional[int] = None) \ | ||
-> Tuple[np.ndarray, np.ndarray, np.ndarray]: | ||
"""Get per_store features from a log file | ||
Parameters | ||
---------- | ||
filename: str | ||
The input filename | ||
max_lines: int | ||
Only extract the first n lines of the file | ||
max_n_bufs: Optional[int] | ||
The maximum number of extracted buffers for one statement | ||
Returns | ||
------- | ||
features: np.ndarray | ||
Feature vectors | ||
normalized_throughputs: np.ndarray | ||
Normalized throughputs | ||
task_ids: np.ndarray | ||
Task ids | ||
""" | ||
byte_arr = _ffi_api.GetPerStoreFeaturesFromFile( | ||
filename, max_lines, max_n_bufs or DEFAULT_MAX_N_BUFS) | ||
return unpack_feature(byte_arr) | ||
|
||
|
||
def get_per_store_features_from_measure_pairs(inputs: List[MeasureInput], | ||
results: List[MeasureResult], | ||
skip_first_n_feature_extraction: int = 0, | ||
max_n_bufs: Optional[int] = None) \ | ||
-> Tuple[np.ndarray, np.ndarray, np.ndarray]: | ||
"""Get per_store features from measurement input/result pairs | ||
Parameters | ||
---------- | ||
inputs: List[MeasureInput] | ||
The measure inputs | ||
results: List[MeasureResult] | ||
The measure results | ||
skip_first_n_feature_extraction: int | ||
Skip feature extraction for the first n states | ||
max_n_bufs: int | ||
The maximum number of extracted buffers for one statement | ||
Returns | ||
------- | ||
features: np.ndarray | ||
Feature vectors | ||
normalized_throughputs: np.ndarray | ||
Normalized throughputs | ||
task_ids: np.ndarray | ||
Task ids | ||
""" | ||
byte_arr = _ffi_api.GetPerStoreFeaturesFromMeasurePairs( | ||
inputs, results, skip_first_n_feature_extraction, max_n_bufs or DEFAULT_MAX_N_BUFS) | ||
return unpack_feature(byte_arr) | ||
|
||
|
||
def get_per_store_features_from_states(states: List[Union[State, StateObject]], | ||
task: "SearchTask", | ||
max_n_bufs: Optional[int] = None) -> List[np.ndarray]: | ||
"""Get per_store features from measurement input/result pairs | ||
Parameters | ||
---------- | ||
states: List[Union[State, StateObject]] | ||
The input states | ||
task: SearchTask | ||
The search task of the input states | ||
max_n_bufs: Optional[int] | ||
The maximum number of extracted buffers for one statement | ||
Returns | ||
------- | ||
features: np.ndarray | ||
Feature vectors | ||
normalized_throughputs: np.ndarray | ||
Normalized throughputs | ||
task_ids: np.ndarray | ||
Task ids | ||
""" | ||
if isinstance(states[0], State): | ||
state_objects = [s.state_object for s in states] | ||
elif isinstance(states[0], StateObject): | ||
state_objects = states | ||
byte_arr = _ffi_api.GetPerStoreFeaturesFromStates( | ||
state_objects, task, max_n_bufs or DEFAULT_MAX_N_BUFS) | ||
return unpack_feature(byte_arr)[0] | ||
|
||
|
||
def get_per_store_feature_names(max_n_bufs: Optional[int] = None) -> List[str]: | ||
"""Get the name of every element in the feature vector. Use this for debug and inspection. | ||
Parameters | ||
---------- | ||
max_n_bufs: int | ||
The maximum number of extracted buffers for one statement | ||
Returns | ||
------- | ||
names: List[str] | ||
The names of elements in the flatten feature vector | ||
""" | ||
return _ffi_api.GetPerStoreFeatureNames(max_n_bufs or DEFAULT_MAX_N_BUFS) |
Oops, something went wrong.