fixed the code spell errors.

This commit is contained in:
caojiewen 2021-03-23 11:13:02 +08:00
parent 12a29ce040
commit cad462902a
62 changed files with 98 additions and 98 deletions

View File

@ -16,7 +16,7 @@
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh run_distribute_eval.sh DEVICE_NUM RANK_TABLE_FILE DATASET CKPT_PATH"
echo "for example: sh run_eval.sh [RANK_TABLE_FILE] /path/to/dataset /path/to/ckpt device_id"
echo "It is better to use absolute path."

View File

@ -76,7 +76,7 @@ if __name__ == "__main__":
model = Model(network, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'})
else:
raise ValueError("Unsupport dataset.")
raise ValueError("Unsupported dataset.")
if ds_eval.get_dataset_size() == 0:
raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")

View File

@ -64,7 +64,7 @@ if __name__ == "__main__":
elif args.dataset_name == "imagenet":
cfg = alexnet_imagenet_cfg
else:
raise ValueError("Unsupport dataset.")
raise ValueError("Unsupported dataset.")
device_target = args.device_target
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
@ -92,7 +92,7 @@ if __name__ == "__main__":
elif args.dataset_name == "imagenet":
ds_train = create_dataset_imagenet(args.data_path, cfg.batch_size)
else:
raise ValueError("Unsupport dataset.")
raise ValueError("Unsupported dataset.")
if ds_train.get_dataset_size() == 0:
raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")
@ -124,7 +124,7 @@ if __name__ == "__main__":
loss_scale_manager = FixedLossScaleManager(cfg.loss_scale, drop_overflow_update=False)
else:
raise ValueError("Unsupport dataset.")
raise ValueError("Unsupported dataset.")
if device_target == "Ascend":
model = Model(network, loss_fn=loss, optimizer=opt, metrics=metrics, amp_level="O2", keep_batchnorm_fp32=False,

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""weight initilization"""
"""weight initialization"""
import math
import numpy as np

View File

@ -35,21 +35,21 @@ using cv::Point;
namespace py = pybind11;
namespace pse_adaptor {
void get_kernals(const int *data, vector<int64> data_shape, vector<Mat> *kernals) {
void get_kernels(const int *data, vector<int64> data_shape, vector<Mat> *kernels) {
for (int i = 0; i < data_shape[0]; ++i) {
Mat kernal = Mat::zeros(data_shape[1], data_shape[2], CV_8UC1);
for (int x = 0; x < kernal.rows; ++x) {
for (int y = 0; y < kernal.cols; ++y) {
kernal.at<char>(x, y) = data[i * data_shape[1] * data_shape[2] + x * data_shape[2] + y];
Mat kernel = Mat::zeros(data_shape[1], data_shape[2], CV_8UC1);
for (int x = 0; x < kernel.rows; ++x) {
for (int y = 0; y < kernel.cols; ++y) {
kernel.at<char>(x, y) = data[i * data_shape[1] * data_shape[2] + x * data_shape[2] + y];
}
}
kernals->emplace_back(kernal);
kernels->emplace_back(kernel);
}
}
void growing_text_line(const vector<Mat> &kernals, vector<vector<int>> *text_line, float min_area) {
void growing_text_line(const vector<Mat> &kernels, vector<vector<int>> *text_line, float min_area) {
Mat label_mat;
int label_num = connectedComponents(kernals[kernals.size() - 1], label_mat, 4);
int label_num = connectedComponents(kernels[kernels.size() - 1], label_mat, 4);
vector<int> area(label_num + 1, 0);
for (int x = 0; x < label_mat.rows; ++x) {
for (int y = 0; y < label_mat.cols; ++y) {
@ -76,7 +76,7 @@ namespace pse_adaptor {
int dx[] = {-1, 1, 0, 0};
int dy[] = {0, 0, -1, 1};
for (int kernal_id = kernals.size() - 2; kernal_id >= 0; --kernal_id) {
for (int kernal_id = kernels.size() - 2; kernal_id >= 0; --kernal_id) {
while (!queue.empty()) {
Point point = queue.front();
queue.pop();
@ -90,7 +90,7 @@ namespace pse_adaptor {
if (tmp_x < 0 || tmp_x >= static_cast<int>(text_line->size())) continue;
if (tmp_y < 0 || tmp_y >= static_cast<int>(text_line->at(1).size())) continue;
if (kernals[kernal_id].at<char>(tmp_x, tmp_y) == 0) continue;
if (kernels[kernal_id].at<char>(tmp_x, tmp_y) == 0) continue;
if (text_line->at(tmp_x)[tmp_y] > 0) continue;
Point point_tmp(tmp_x, tmp_y);
@ -110,10 +110,10 @@ namespace pse_adaptor {
vector<vector<int>> pse(py::array_t<int, py::array::c_style | py::array::forcecast> quad_n9, float min_area) {
auto buf = quad_n9.request();
auto data = static_cast<int *>(buf.ptr);
vector<Mat> kernals;
get_kernals(data, buf.shape, &kernals);
vector<Mat> kernels;
get_kernels(data, buf.shape, &kernels);
vector<vector<int>> text_line;
growing_text_line(kernals, &text_line, min_area);
growing_text_line(kernels, &text_line, min_area);
return text_line;
}

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh run_distribute_train.sh DEVICE_NUM EPOCH_SIZE LR DATASET RANK_TABLE_FILE PRE_TRAINED PRE_TRAINED_EPOCH_SIZE"
echo "for example: sh run_distribute_train.sh 8 500 0.2 coco /data/hccl.json /opt/ssd-300.ckpt(optional) 200(optional)"
echo "It is better to use absolute path."
@ -33,7 +33,7 @@ BASE_PATH=$(cd "`dirname $0`" || exit; pwd)
cd $BASE_PATH/../ || exit
python train.py --only_create_dataset=True --dataset=$4
echo "After running the scipt, the network runs in the background. The log will be generated in LOGx/log.txt"
echo "After running the script, the network runs in the background. The log will be generated in LOGx/log.txt"
export RANK_SIZE=$1
EPOCH_SIZE=$2

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh run_distribute_train_gpu.sh DEVICE_NUM EPOCH_SIZE LR DATASET PRE_TRAINED PRE_TRAINED_EPOCH_SIZE"
echo "for example: sh run_distribute_train_gpu.sh 8 500 0.2 coco /opt/ssd-300.ckpt(optional) 200(optional)"
echo "It is better to use absolute path."
@ -33,7 +33,7 @@ BASE_PATH=$(cd "`dirname $0`" || exit; pwd)
cd $BASE_PATH/../ || exit
python train.py --only_create_dataset=True --run_platform="GPU" --dataset=$4
echo "After running the scipt, the network runs in the background. The log will be generated in LOG/log.txt"
echo "After running the script, the network runs in the background. The log will be generated in LOG/log.txt"
export RANK_SIZE=$1
EPOCH_SIZE=$2

View File

@ -57,7 +57,7 @@ cp ./*.py ./eval$3
cp -r ./src ./eval$3
cd ./eval$3 || exit
env > env.log
echo "start infering for device $DEVICE_ID"
echo "start inferring for device $DEVICE_ID"
python eval.py \
--dataset=$DATASET \
--checkpoint_path=$CHECKPOINT_PATH \

View File

@ -57,7 +57,7 @@ cp ./*.py ./eval$3
cp -r ./src ./eval$3
cd ./eval$3 || exit
env > env.log
echo "start infering for device $DEVICE_ID"
echo "start inferring for device $DEVICE_ID"
python eval.py \
--dataset=$DATASET \
--checkpoint_path=$CHECKPOINT_PATH \

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "bash run_distribute_pretrain.sh DEVICE_NUM EPOCH_SIZE DATA_DIR SCHEMA_DIR RANK_TABLE_FILE"
echo "for example: bash run_distribute_pretrain.sh 8 1 /path/zh-wiki/ /path/Schema.json /path/hccl.json"
echo "It is better to use absolute path."

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "bash run_standalone_pretrain.sh DEVICE_ID EPOCH_SIZE DATA_DIR SCHEMA_DIR"
echo "for example: bash run_standalone_pretrain.sh 0 40 /path/zh-wiki/ /path/Schema.json"
echo "=============================================================================================================="

View File

@ -585,7 +585,7 @@ class Model:
returned and passed to the network. Otherwise, a tuple (data, label) should
be returned, and the data and label are passed to the network and loss
function respectively.
callbacks (list): List of callback object. Callbacks which should be excuted while training. Default: None.
callbacks (list): List of callback object. Callbacks which should be executed while training. Default: None.
dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True.
Configure pynative mode, the training process will be performed with
dataset not sink.
@ -704,7 +704,7 @@ class Model:
Args:
valid_dataset (Dataset): Dataset to evaluate the model.
callbacks (list): List of callback object. Callbacks which should be excuted
callbacks (list): List of callback object. Callbacks which should be executed
while training. Default: None.
dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True.

View File

@ -65,7 +65,7 @@ def make_directory(path: str):
"""Make directory."""
if path is None or not isinstance(path, str) or path.strip() == "":
logger.error("The path(%r) is invalid type.", path)
raise TypeError("Input path is invaild type")
raise TypeError("Input path is invalid type")
# convert the relative paths
path = os.path.realpath(path)

View File

@ -25,9 +25,9 @@
# [FastText](#contents)
FastText is a fast text classification algorithm, which is simple and efficient. It was proposed by Armand
Joulin, Tomas Mikolov etc. in the artical "Bag of Tricks for Efficient Text Classification" in 2016. It is similar to
Joulin, Tomas Mikolov etc. in the article "Bag of Tricks for Efficient Text Classification" in 2016. It is similar to
CBOW in model architecture, where the middle word is replace by a label. FastText adopts ngram feature as addition feature
to get some information about words. It speeds up training and testing while maintaining high percision, and widly used
to get some information about words. It speeds up training and testing while maintaining high precision, and widly used
in various tasks of text classification.
[Paper](https://arxiv.org/pdf/1607.01759.pdf): "Bag of Tricks for Efficient Text Classification", 2016, A. Joulin, E. Grave, P. Bojanowski, and T. Mikolov

View File

@ -14,7 +14,7 @@
# limitations under the License.
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh create_dataset.sh SOURCE_DATASET_PATH DATASET_NAME"
echo "for example: sh create_dataset.sh /home/workspace/ag_news_csv ag"
echo "DATASET_NAME including ag, dbpedia, and yelp_p"

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh run_distributed_train.sh DATASET_PATH RANK_TABLE_PATH"
echo "for example: sh run_distributed_train.sh /home/workspace/ag /home/workspace/rank_table_file.json"
echo "It is better to use absolute path."

View File

@ -14,7 +14,7 @@
# limitations under the License.
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh run_eval.sh DATASET_PATH DATASET_NAME MODEL_CKPT"
echo "for example: sh run_eval.sh /home/workspace/ag/test*.mindrecord ag device0/ckpt0/fasttext-5-118.ckpt"
echo "It is better to use absolute path."

View File

@ -14,7 +14,7 @@
# limitations under the License.
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh run_standalone_train.sh DATASET_PATH"
echo "for example: sh run_standalone_train.sh /home/workspace/ag"
echo "It is better to use absolute path."

View File

@ -62,7 +62,7 @@ class FastText(nn.Cell):
embeding = self.realdiv(embeding, src_token_length)
embeding = self.cast(embeding, mstype.float16)
classifer = self.fc(embeding)
classifer = self.cast(classifer, mstype.float32)
classifier = self.fc(embeding)
classifier = self.cast(classifier, mstype.float32)
return classifer
return classifier

View File

@ -184,7 +184,7 @@ def train_paralle(input_file_path):
input_file_path: preprocessed dataset path
"""
set_parallel_env()
print("Starting traning on mutiple devices. |~ _ ~| |~ _ ~| |~ _ ~| |~ _ ~|")
print("Starting traning on multiple devices. |~ _ ~| |~ _ ~| |~ _ ~| |~ _ ~|")
preprocessed_data = load_dataset(dataset_path=input_file_path,
batch_size=config.batch_size,
epoch_count=config.epoch_count,

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "bash script/pre_process.sh \"INPUT_GLOB\" DATASET_TYPE OUTPUT_FILE"
echo "for example: bash script/pre_process.sh \"dataset/*.output\" openwebtext ./output/openwebtext.mindrecord"
echo "=============================================================================================================="

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "bash run_distributed_pretrain_ascend.sh DATA_DIR RANK_TABLE_FILE DEVICE_NUM"
echo "for example: bash run_distributed_pretrain_ascend.sh /path/dataset /path/hccl.json 8"
echo "It is better to use absolute path."

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "bash scripts/run_evaluation.sh TASK_TYPE CKPT_PATH DATA_PATH METRICS"
echo "for example: bash scripts/run_evaluation.sh lambada /your/ckpt /your/data acc"
echo "=============================================================================================================="

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "bash run_standalone_pretrain_ascend.sh DEVICE_ID EPOCH_SIZE DATA_DIR"
echo "for example: bash run_standalone_pretrain_ascend.sh 0 40 /path/zh-wiki/"
echo "=============================================================================================================="

View File

@ -14,7 +14,7 @@
# ============================================================================
"""
Create dataset for training and evaluting
Create dataset for training and evaluating
"""
import os

View File

@ -48,7 +48,7 @@ Note that you can run the scripts based on the dataset mentioned in original pap
# [Quick Start](#contents)
- runing on Ascend
- running on Ascend
```bash
# run training example
@ -58,7 +58,7 @@ Note that you can run the scripts based on the dataset mentioned in original pap
bash run_eval_ascend.sh 0 ./preprocess lstm-20_390.ckpt
```
- runing on GPU
- running on GPU
```bash
# run training example
@ -68,7 +68,7 @@ Note that you can run the scripts based on the dataset mentioned in original pap
bash run_eval_gpu.sh 0 ./aclimdb ./glove_dir lstm-20_390.ckpt
```
- runing on CPU
- running on CPU
```bash
# run training example
@ -200,7 +200,7 @@ Ascend:
- Set options in `config.py`, including learning rate and network hyperparameters.
- runing on Ascend
- running on Ascend
Run `sh run_train_ascend.sh` for training.
@ -217,7 +217,7 @@ Ascend:
...
```
- runing on GPU
- running on GPU
Run `sh run_train_gpu.sh` for training.
@ -234,7 +234,7 @@ Ascend:
...
```
- runing on CPU
- running on CPU
Run `sh run_train_cpu.sh` for training.

View File

@ -349,7 +349,7 @@ GPU:
sh run_gpu.sh [--options]
```
The usage of `run_ascend.sh` is shown as bellow:
The usage of `run_ascend.sh` is shown as below:
```text
Usage: run_ascend.sh [-h, --help] [-t, --task <CHAR>] [-n, --device_num <N>]
@ -371,7 +371,7 @@ options:
Notes: Be sure to assign the hccl_json file while running a distributed-training.
The usage of `run_gpu.sh` is shown as bellow:
The usage of `run_gpu.sh` is shown as below:
```text
Usage: run_gpu.sh [-h, --help] [-t, --task <CHAR>] [-n, --device_num <N>]

View File

@ -54,7 +54,7 @@ def get_rouge_score(result, vocab):
"target", "prediction" and "prediction_prob".
Dictionary, dict instance.
retur:
return:
Str, rouge score.
"""

View File

@ -340,7 +340,7 @@ GPU:
sh run_gpu.sh [--options]
```
The usage of `run_ascend.sh` is shown as bellow:
The usage of `run_ascend.sh` is shown as below:
```text
Usage: run_ascend.sh [-h, --help] [-t, --task <CHAR>] [-n, --device_num <N>]
@ -362,7 +362,7 @@ options:
Notes: Be sure to assign the hccl_json file while running a distributed-training.
The usage of `run_gpu.sh` is shown as bellow:
The usage of `run_gpu.sh` is shown as below:
```text
Usage: run_gpu.sh [-h, --help] [-t, --task <CHAR>] [-n, --device_num <N>]

View File

@ -54,7 +54,7 @@ def get_rouge_score(result, vocab):
"target", "prediction" and "prediction_prob".
Dictionary, dict instance.
retur:
return:
Str, rouge score.
"""

View File

@ -51,7 +51,7 @@ Dataset used: [Movie Review Data](<http://www.cs.cornell.edu/people/pabo/movie-r
After installing MindSpore via the official website, you can start training and evaluation as follows:
- runing on Ascend
- running on Ascend
```python
# run training example

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "bash scripts/run_distributed_gd.sh DEVICE_NUM EPOCH_SIZE RANK_TABLE_FILE"
echo "for example: bash scripts/run_distributed_gd.sh 8 40 /path/hccl.json"
echo "It is better to use absolute path."

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "bash run_distributed_gd_gpu.sh DEVICE_NUM EPOCH_SIZE DATA_DIR SCHEMA_DIR TEACHER_CKPT_PATH"
echo "for example: bash run_distributed_gd_gpu.sh 8 3 /path/data/ /path/datasetSchema.json /path/bert_base.ckpt"
echo "It is better to use absolute path."

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "bash scripts/run_standalone_gd.sh"
echo "for example: bash scripts/run_standalone_gd.sh"
echo "running....... please see details by log.txt"

View File

@ -15,9 +15,9 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "bash scipts/run_standalone_td.sh"
echo "for example: bash scipts/run_standalone_td.sh"
echo "Please run the script as: "
echo "bash scripts/run_standalone_td.sh"
echo "for example: bash scripts/run_standalone_td.sh"
echo "=============================================================================================================="
mkdir -p ms_log

View File

@ -60,7 +60,7 @@ eval_cfg = edict({
'''
Including two kinds of network: \
teacher network: The BERT-base network with finetune.
student network: The model which is producted by GD phase.
student network: The model which is produced by GD phase.
'''
td_teacher_net_cfg = BertConfig(
seq_length=128,

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh process_output.sh REF_DATA EVAL_OUTPUT VOCAB_FILE"
echo "for example: sh process_output.sh /path/newstest2014.tok.de /path/eval_output_file /path/vocab.bpe.32000"
echo "It is better to use absolute path."

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh run_distribute_pretrain.sh DEVICE_NUM EPOCH_SIZE DATA_PATH RANK_TABLE_FILE"
echo "for example: sh run_distribute_pretrain.sh 8 52 /path/ende-l128-mindrecord00 /path/hccl.json"
echo "It is better to use absolute path."

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh run_distribute_pretrain_gpu.sh DEVICE_NUM EPOCH_SIZE DATA_PATH"
echo "for example: sh run_distribute_pretrain.sh 8 55 /path/ende-l128-mindrecord00"
echo "It is better to use absolute path."

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh run_eval.sh DEVICE_TARGET DEVICE_ID"
echo "for example: sh run_eval.sh Ascend 0"
echo "Note: set the checkpoint and dataset path in src/eval_config.py"

View File

@ -49,7 +49,7 @@ The FM and deep component share the same input raw feature vector, which enables
After installing MindSpore via the official website, you can start training and evaluation as follows:
- runing on Ascend
- running on Ascend
```shell
# run training example

View File

@ -168,7 +168,7 @@ optional arguments:
is a slice of weight, multiple checkpoint files need to be
transferred. Use ';' to separate them and sort them in sequence
like "./checkpoints/0.ckpt;./checkpoints/1.ckpt".
(Defalut:./checkpoints/)
(Default:./checkpoints/)
--eval_file_name Eval output file.(Default:eval.og)
--loss_file_name Loss output file.(Default:loss.log)
--host_device_mix Enable host device mode or not.(Default:0)
@ -326,7 +326,7 @@ python eval.py
| AUC Score | 0.80937 | 0.80971 | 0.80862 | 0.80834 |
| Speed | 20.906 ms/step | 24.465 ms/step | 27.388 ms/step | 236.506 ms/step |
| Loss | wide:0.433,deep:0.444 | wide:0.444, deep:0.456 | wide:0.437, deep: 0.448 | wide:0.444, deep:0.444 |
| Parms(M) | 75.84 | 75.84 | 75.84 | 75.84 |
| Params(M) | 75.84 | 75.84 | 75.84 | 75.84 |
| Checkpoint for inference | 233MB(.ckpt file) | 230MB(.ckpt) | 233MB(.ckpt file) | 233MB(.ckpt file) |
All executable scripts can be found in [here](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/official/recommend/wide_and_deep/script)

View File

@ -170,7 +170,7 @@ optional arguments:
is a slice of weight, multiple checkpoint files need to be
transferred. Use ';' to separate them and sort them in sequence
like "./checkpoints/0.ckpt;./checkpoints/1.ckpt".
(Defalut:./checkpoints/)
(Default:./checkpoints/)
--eval_file_name Eval output file.(Default:eval.og)
--loss_file_name Loss output file.(Default:loss.log)
--host_device_mix Enable host device mode or not.(Default:0)

View File

@ -292,7 +292,7 @@ def _get_vocab_size(target_column_number, worker_size, total_vocab_size, multipl
index_offsets = [0]
# The gold feature numbers ared used to caculate the offset
# The gold feature numbers ared used to calculate the offset
features = [item for item in new_vocab_size]
# According to the per_vocab_size, maxize the vocab size

View File

@ -133,5 +133,5 @@ if __name__ == "__main__":
auc_val = validation(network, cfg.checkpoint_path + "/" + cfg.model_name, cfg.data_dir,
cfg.val_filename, cfg.num_consumer, cfg.batch_size)
print("=" * 10 + "Validation Peformance" + "=" * 10)
print("=" * 10 + "Validation Performance" + "=" * 10)
print("AUC: {:.5f}".format(auc_val))

View File

@ -48,7 +48,7 @@ if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE,
device_target="GPU", save_graphs=False)
else:
raise ValueError("Unsupport platform.")
raise ValueError("Unsupported platform.")
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

View File

@ -55,7 +55,7 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
data_set = ds.MindDataset(
dataset_path, num_parallel_workers=8, shuffle=True)
else:
raise ValueError("Unsupport platform.")
raise ValueError("Unsupported platform.")
resize_height = config.image_height
buffer_size = 1000

View File

@ -34,7 +34,7 @@ def parse_args():
>>> parse_args()
"""
parser = ArgumentParser(description="mindspore distributed training launch "
"helper utilty that will spawn up "
"helper utility that will spawn up "
"multiple distributed processes")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "

View File

@ -47,7 +47,7 @@ if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE,
device_target="GPU", save_graphs=False)
else:
raise ValueError("Unsupport platform.")
raise ValueError("Unsupported platform.")
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

View File

@ -55,7 +55,7 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
data_set = ds.MindDataset(
dataset_path, num_parallel_workers=8, shuffle=True)
else:
raise ValueError("Unsupport platform.")
raise ValueError("Unsupported platform.")
resize_height = config.image_height
buffer_size = 1000

View File

@ -34,7 +34,7 @@ def parse_args():
>>> parse_args()
"""
parser = ArgumentParser(description="mindspore distributed training launch "
"helper utilty that will spawn up "
"helper utility that will spawn up "
"multiple distributed processes")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "

View File

@ -49,7 +49,7 @@ if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE,
device_target="GPU", save_graphs=False)
else:
raise ValueError("Unsupport platform.")
raise ValueError("Unsupported platform.")
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

View File

@ -56,7 +56,7 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
data_set = ds.MindDataset(
dataset_path, num_parallel_workers=8, shuffle=False)
else:
raise ValueError("Unsupport platform.")
raise ValueError("Unsupported platform.")
resize_height = config.image_height
buffer_size = 1000

View File

@ -74,7 +74,7 @@ For FP16 operators, if the input data type is FP32, the backend of MindSpore wil
After installing MindSpore via the official website, you can start training and evaluation as follows:
- runing on Ascend
- running on Ascend
```bash
# distributed training

View File

@ -59,7 +59,7 @@ Dataset used: [COCO2017](<http://images.cocodataset.org/>)
```
2. If your own dataset is used. **Select dataset to other when run script.**
Organize the dataset infomation into a TXT file, each row in the file is as follows:
Organize the dataset information into a TXT file, each row in the file is as follows:
```python
train2017/0000001.jpg 0,259,401,459,7 35,28,324,201,2 0,30,59,80,2

View File

@ -15,7 +15,7 @@
# ============================================================================
echo "=============================================================================================================="
echo "Please run the scipt as: "
echo "Please run the script as: "
echo "sh run_distribute_train_ghostnet.sh DEVICE_NUM EPOCH_SIZE LR DATASET RANK_TABLE_FILE PRE_TRAINED PRE_TRAINED_EPOCH_SIZE"
echo "for example: sh run_distribute_train_ghostnet.sh 8 500 0.2 coco /data/hccl.json /opt/ssd-300.ckpt(optional) 200(optional)"
echo "It is better to use absolute path."
@ -33,7 +33,7 @@ BASE_PATH=$(cd "`dirname $0`" || exit; pwd)
cd $BASE_PATH/../ || exit
python train.py --only_create_dataset=True
echo "After running the scipt, the network runs in the background. The log will be generated in LOGx/log.txt"
echo "After running the script, the network runs in the background. The log will be generated in LOGx/log.txt"
export RANK_SIZE=$1
EPOCH_SIZE=$2

View File

@ -50,7 +50,7 @@ def split_imgs_and_labels(imgs, labels, batchInfo):
def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
input_size=224, color_jitter=0.4):
"""Creat ImageNet training dataset"""
"""Create ImageNet training dataset"""
if not os.path.exists(train_data_url):
raise ValueError('Path not exists')
decode_op = py_vision.Decode()
@ -102,7 +102,7 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=False,
input_size=224):
"""Creat ImageNet validation dataset"""
"""Create ImageNet validation dataset"""
if not os.path.exists(val_data_url):
raise ValueError('Path not exists')
rank_id = get_rank() if distributed else 0

View File

@ -15,7 +15,7 @@
## Description
Generalized Operator Modelling of the Ocean (GOMO) is a three-dimensional ocean model based on OpenArray which is a simple operator library for the decoupling of ocean modelling and parallel computing (Xiaomeng Huang et al, 2019). GOMO is a numerical solution model using finite differential algorithm to solve PDE equations. With MindSpore and GPU, we can achieve great improvments in solving those PDE equations compared with CPU.
Generalized Operator Modelling of the Ocean (GOMO) is a three-dimensional ocean model based on OpenArray which is a simple operator library for the decoupling of ocean modelling and parallel computing (Xiaomeng Huang et al, 2019). GOMO is a numerical solution model using finite differential algorithm to solve PDE equations. With MindSpore and GPU, we can achieve great improvements in solving those PDE equations compared with CPU.
This is an example of training GOMO Model with MindSpore on GPU.
## Model Architecture

View File

@ -668,7 +668,7 @@ class GOMO(nn.Cell):
el: the surface elevation as used in the external mode (m).
Returns:
tuple[Tensor], update varibles of external mode
tuple[Tensor], update variables of external mode
"""
adx2d = self.reduce_sum(advx * self.dz, 2)
ady2d = self.reduce_sum(advy * self.dz, 2)
@ -836,7 +836,7 @@ class GOMO(nn.Cell):
utf, vtf: ua, va time averaged over the interval, DT = dti(ms-1)
Returns:
tuple[Tensor], update varibles of external mode
tuple[Tensor], update variables of external mode
"""
vamax = P.ReduceMax()(P.Abs()(vaf))
if iext == (self.isplit - 2):

View File

@ -204,8 +204,8 @@ Parameters for both training and evaluation can be set in config.py.
for shell script:
```python
# sh srcipts/run_train_ascend.sh [device_id]
sh srcipts/run_train_ascend.sh 0
# sh scripts/run_train_ascend.sh [device_id]
sh scripts/run_train_ascend.sh 0
```
for python script:

View File

@ -21,7 +21,7 @@ Locally Differentially Private (LDP) LinUCB is a variant of LinUCB bandit algori
# [Model Architecture](#contents)
The server interacts with users in rounds. For a coming user, the server first transfers the current model parameters to the user. In the user side, the model chooses an action based on the user feature to play (e.g., choose a movie to recommend), and observes a reward (or loss) value from the user (e.g., rating of the movie). Then we perturb the data to be transfered by adding Gaussian noise. Finally, the server receives the perturbed data and updates the model. Details can be found in the [original paper](https://arxiv.org/abs/2006.00701).
The server interacts with users in rounds. For a coming user, the server first transfers the current model parameters to the user. In the user side, the model chooses an action based on the user feature to play (e.g., choose a movie to recommend), and observes a reward (or loss) value from the user (e.g., rating of the movie). Then we perturb the data to be transferred by adding Gaussian noise. Finally, the server receives the perturbed data and updates the model. Details can be found in the [original paper](https://arxiv.org/abs/2006.00701).
# [Dataset](#contents)
@ -54,7 +54,7 @@ Dataset used: [MovieLens 100K](https://grouplens.org/datasets/movielens/100k/)
├── ldp_linucb
├── README.md // descriptions about LDP LinUCB
├── scripts
│ ├── run_train_eval.sh // shell script for runing on Ascend
│ ├── run_train_eval.sh // shell script for running on Ascend
├── src
│ ├── dataset.py // dataset for movielens
│ ├── linucb.py // model
@ -124,7 +124,7 @@ The performance compared with optimal non-private regret O(sqrt(T)):
# [Description of Random Situation](#contents)
In `train_eval.py`, we randomly sample a user at each round. We also add Gaussian noise to the date being transfered.
In `train_eval.py`, we randomly sample a user at each round. We also add Gaussian noise to the date being transferred.
# [ModelZoo Homepage](#contents)

View File

@ -34,7 +34,7 @@ def parse_args():
>>> parse_args()
"""
parser = ArgumentParser(description="mindspore distributed training launch "
"helper utilty that will generate hccl"
"helper utility that will generate hccl"
" config file")
parser.add_argument("--device_num", type=str, default="[0,8)",
help="The number of the Ascend accelerators used. please note that the Ascend accelerators"