diff --git a/mindconverter/.gitignore b/mindconverter/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f971976a1aa9ecf51865220c28e54bcfc871ad6d --- /dev/null +++ b/mindconverter/.gitignore @@ -0,0 +1,143 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +output/ +repdir/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +#mytest +test.ipynb + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ diff --git a/mindconverter/LICENSE b/mindconverter/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/mindconverter/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/mindconverter/MANIFEST.in b/mindconverter/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..c0e8b4d9f0bc2fda07f6ad8f479aded283f78353 --- /dev/null +++ b/mindconverter/MANIFEST.in @@ -0,0 +1,5 @@ +recursive-include mindconverter * +recursive-exclude * .git +recursive-exclude * .gitignore +recursive-exclude * __pycache__ +recursive-exclude * *.py[co] *.swp \ No newline at end of file diff --git a/mindconverter/README.md b/mindconverter/README.md new file mode 100644 index 0000000000000000000000000000000000000000..91ae7acab87a32f958f639ff7c794b4922446142 --- /dev/null +++ b/mindconverter/README.md @@ -0,0 +1,198 @@ +# Mindconverter 文档 + +> 当前版本:BETA + +View English + +## 介绍 + +MindConverter是一款用于将PyTorch脚本转换到MindSpore脚本的工具。结合转换报告的信息,用户只需对转换后的脚本进行微小的改动,即可快速将PyTorch框架的模型迁移到MindSpore。 + +## 安装 + +简易安装: + +运行 `python setup.py install`即可完成安装 + +## 使用方法(命令行使用) + +``` +mindconverter [-h] [--version] --in_file IN_FILE [--output OUTPUT] + [--report REPORT] + +可选参数列表: + -h, --help 显示此帮助消息并退出 + --version 显示程序版本号并退出 + --in_file IN_FILE 指定源代码文件的路径 + --output OUTPUT 指定转换后的代码的存储路径,默认情况输出位于当前的工作目录 + --report REPORT 指定转换日志的存储路径,默认情况输出位于当前的工作目录 +``` + +请注意:由于mindspore和pytorch架构存在的一些差异,本工具并不能完美进行转换,相关的信息以输出报告的形式存储下载,请您在转换完毕后比对输出日志进行调整 + +### 不支持转换的类型 + +- `Dtype`参数,由于mindspore和pytorch的数据类型不同,该参数无法被转换,需要您根据代码进行调整; +- 参数类型为`Tuple`,`Dict`的算子,由于某些算子,mindspore暂不支持以元组形式传参,故此暂不支持对该类型的算子进行转换,在输出报告中会给出相应的解决方案和对应的文档链接,需要您进行比对然后手动进行修改; + +## 使用样例 + +下面主要是对于两类使用场景下进行样例解析: + +1. 对于简单模型「Mindconverter 完全支持的算子类型」 + + 对于Pytorch实现的`lenet.py`模型如下: + +```python +import torch.nn as nn +import torch.nn.functional as F + + +class TestLeNet(nn.Module): + """TestLeNet network.""" + def __init__(self): + self.conv1 = nn.Conv2d(3, 6, 5) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, input_x): + """Callback method.""" + out = F.relu(self.conv1(input_x)) + out = F.max_pool2d(out, 2) + out = F.relu(self.conv2(out)) + out = F.max_pool2d(out, 2) + out = out.view(out.size(0), -1) + out = F.relu(self.fc1(out)) + out = F.relu(self.fc2(out)) + out = self.fc3(out) + return out +``` + + - 安装Mindconverter后,使用命令行指令 `mindconverter --in_file lenet.py`,如果未制定报告输出目录`–output`及模型输出目录`–report`,将在当前生成`output` 文件夹,存放转换后的模型代码,同时在当前目录下生成`MODEL_report`报告文件,报告文件格式: + +``` +[开始转换] +[插入] 'import mindspore.ops' 已经插入到被转换为文件中 +[插入] 'import mindspore.experimental.optim as optim' 已经插入到被转换为文件中 +行 1:列 0: [已转换] 'import torch.nn as nn' 已经被转换为了 'import mindspore.nn as nn'. +行 2:列 0: [已转换] 'import torch.nn.functional as F' 已经被转换为了 'import mindspore.ops as ops'. +行 5:列 16: [已转换] 'nn.Module' 已经被转换为了 'nn.Cell'. +行 8:列 21: [已转换] 'nn.Conv2d' 已经被转换为了 'nn.Conv2d'. 参数已转换,本算子MindSpore与Pytorch存在一些差异,参考资料: MindSpore:与PyTorch实现的功能基本一致,但存在偏置差异和填充差异。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Conv2d.html +行 9:列 21: [已转换] 'nn.Conv2d' 已经被转换为了 'nn.Conv2d'. 参数已转换,本算子MindSpore与Pytorch存在一些差异,参考资料: MindSpore:与PyTorch实现的功能基本一致,但存在偏置差异和填充差异。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Conv2d.html +行 10:列 19: [已转换] 'nn.Linear' 已经被转换为了 'nn.Dense'. MindSpore:MindSpore此API实现功能与PyTorch基本一致,而且可以在全连接层后添加激活函数。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Dense.html +行 11:列 19: [已转换] 'nn.Linear' 已经被转换为了 'nn.Dense'. MindSpore:MindSpore此API实现功能与PyTorch基本一致,而且可以在全连接层后添加激活函数。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Dense.html +行 12:列 19: [已转换] 'nn.Linear' 已经被转换为了 'nn.Dense'. MindSpore:MindSpore此API实现功能与PyTorch基本一致,而且可以在全连接层后添加激活函数。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Dense.html +行 14:列 4: [已转换] 'forward' 已经被转换为了 'construct'. +行 16:列 14: [已转换] 'F.relu' 已经被转换为了 'ops.relu'. +行 17:列 14: [已转换] 'F.max_pool2d' 已经被转换为了 'ops.MaxPool'. +行 18:列 14: [已转换] 'F.relu' 已经被转换为了 'ops.relu'. +行 19:列 14: [已转换] 'F.max_pool2d' 已经被转换为了 'ops.MaxPool'. +行 20:列 23: [已转换] 'out.size' 已经被转换为了 'out.shape'. +行 21:列 14: [已转换] 'F.relu' 已经被转换为了 'ops.relu'. +行 22:列 14: [已转换] 'F.relu' 已经被转换为了 'ops.relu'. +[转换完毕] +``` + +报告文件将给出识别到的每个pytorch算子的转换方式,由于Mindspore和Pytorch框架存在诸多差异,建议用户参考报告内容,对转换后的模型代码再进行适当的微调。 + +2. 对于不支持模型的转换,需要根据报告文件进行微调 + +例如在`covLSTM.py`模型中,mindconverter对于`torch.zeros`的元组传入参数不支持, + +```python +import torch.nn as nn +import torch + +... +class ConvLSTMCell(nn.Module): + + def __init__(self, input_dim, hidden_dim, kernel_size, bias): + ... + + def forward(self, input_tensor, cur_state): + h_cur, c_cur = cur_state + + combined = torch.cat([input_tensor, h_cur], dim=1) + + combined_conv = self.conv(combined) + cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1) + i = torch.sigmoid(cc_i) + f = torch.sigmoid(cc_f) + o = torch.sigmoid(cc_o) + g = torch.tanh(cc_g) + + c_next = f * c_cur + i * g + h_next = o * torch.tanh(c_next) + + return h_next, c_next + + def init_hidden(self, batch_size, image_size): + height, width = image_size + return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device), + torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device)) + +...... +``` + +在输出报告中对应代码位置将显示 + +``` +行 48:列 12: [已转换] 'torch.tanh' 已经被转换为了 'ops.tanh'. +行 51:列 21: [已转换] 'torch.tanh' 已经被转换为了 'ops.tanh'. +行 57:列 16: [未转换] 'torch.zeros' 没有进行转换,建议您自行进行参数转换 +行 58:列 16: [未转换] 'torch.zeros' 没有进行转换,建议您自行进行参数转换 +行 61:列 15: [已转换] 'nn.Module' 已经被转换为了 'nn.Cell'. +``` + +而在输出代码结果中当前代码并没有被转换并且会进行报错 + +```python +import mindspore.nn as nn +import mindspore.experimental.optim as optim +import mindspore.ops +import mindspore.ops as ops + + +class ConvLSTMCell(nn.Cell): + + def __init__(self, input_dim, hidden_dim, kernel_size, bias): + ... + def construct(self, input_tensor, cur_state): + h_cur, c_cur = cur_state + + combined = ops.cat(tensors=[input_tensor, h_cur], axis=1) + + combined_conv = self.conv(combined) + cc_i, cc_f, cc_o, cc_g = ops.split(tensor=combined_conv, split_size_or_sections=self.hidden_dim, axis=1) + i = ops.sigmoid(input=cc_i) + f = ops.sigmoid(input=cc_f) + o = ops.sigmoid(input=cc_o) + g = ops.tanh(input=cc_g) + + c_next = f * c_cur + i * g + h_next = o * ops.tanh(input=c_next) + + return h_next, c_next + + def init_hidden(self, batch_size, image_size): + height, width = image_size + return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device), #未进行转换 + torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device)) #未进行转换 + +...... +``` + +需要用户手动进行微调,例如: + +```python +torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device) + +ops.zeros((batch_size, self.hidden_dim, height, width), dtype=None) +``` + + + + + diff --git a/mindconverter/README_CN.md b/mindconverter/README_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..03f38cbaba900fe631690410995c8a2056e257e0 --- /dev/null +++ b/mindconverter/README_CN.md @@ -0,0 +1,199 @@ +# Mindconverter 文档 + +> 当前版本:BETA + +View English + +## 介绍 + +MindConverter是一款用于将PyTorch脚本转换到MindSpore脚本的工具。结合转换报告的信息,用户只需对转换后的脚本进行微小的改动,即可快速将PyTorch框架的模型迁移到MindSpore。 + +当前提供转换的范围是`torch`、`torch.nn`、`torch.nn.functional`、`torch.Tensor`、`torch.optim` + +## 安装 + +简易安装: + +运行 `python setup.py install`即可完成安装 + +## 使用方法(命令行使用) + +``` +mindconverter [-h] [--version] --in_file IN_FILE [--output OUTPUT] + [--report REPORT] + +可选参数列表: + -h, --help 显示此帮助消息并退出 + --version 显示程序版本号并退出 + --in_file IN_FILE 指定源代码文件的路径 + --output OUTPUT 指定转换后的代码的存储路径,默认情况输出位于当前的工作目录 + --report REPORT 指定转换日志的存储路径,默认情况输出位于当前的工作目录 +``` + +请注意:由于mindspore和pytorch架构存在的一些差异,本工具并不能完美进行转换,相关的信息以输出报告的形式存储下载,请您在转换完毕后比对输出日志进行调整 + +### 不支持转换的类型 + +- `Dtype`参数,由于mindspore和pytorch的数据类型不同,该参数无法被转换,需要您根据代码进行调整; +- 参数类型为`Tuple`,`Dict`的算子,由于某些算子,mindspore暂不支持以元组形式传参,故此暂不支持对该类型的算子进行转换,在输出报告中会给出相应的解决方案和对应的文档链接,需要您进行比对然后手动进行修改; +- 模块是继承torch.nn.Modules()的类拥有的方法,无法进行转换,需要用户对模型进行调整; + +## 使用样例 + +下面主要是对于两类使用场景下进行样例解析: + +1. 对于简单模型「Mindconverter 完全支持的算子类型」 + + 对于Pytorch实现的`lenet.py`模型如下: + +```python +import torch.nn as nn +import torch.nn.functional as F + + +class TestLeNet(nn.Module): + """TestLeNet network.""" + def __init__(self): + self.conv1 = nn.Conv2d(3, 6, 5) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, input_x): + """Callback method.""" + out = F.relu(self.conv1(input_x)) + out = F.max_pool2d(out, 2) + out = F.relu(self.conv2(out)) + out = F.max_pool2d(out, 2) + out = out.view(out.size(0), -1) + out = F.relu(self.fc1(out)) + out = F.relu(self.fc2(out)) + out = self.fc3(out) + return out +``` + + - 安装Mindconverter后,使用命令行指令 `mindconverter --in_file lenet.py`,如果未制定报告输出目录`–output`及模型输出目录`–report`,将在当前生成`output` 文件夹,存放转换后的模型代码,同时在当前目录下生成`MODEL_report`报告文件,报告文件格式: + +``` +[开始转换] +[插入] 'import mindspore.ops' 已经插入到被转换为文件中 +[插入] 'import mindspore.experimental.optim as optim' 已经插入到被转换为文件中 +行 1:列 0: [已转换] 'import torch.nn as nn' 已经被转换为了 'import mindspore.nn as nn'. +行 2:列 0: [已转换] 'import torch.nn.functional as F' 已经被转换为了 'import mindspore.ops as ops'. +行 5:列 16: [已转换] 'nn.Module' 已经被转换为了 'nn.Cell'. +行 8:列 21: [已转换] 'nn.Conv2d' 已经被转换为了 'nn.Conv2d'. 参数已转换,本算子MindSpore与Pytorch存在一些差异,参考资料: MindSpore:与PyTorch实现的功能基本一致,但存在偏置差异和填充差异。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Conv2d.html +行 9:列 21: [已转换] 'nn.Conv2d' 已经被转换为了 'nn.Conv2d'. 参数已转换,本算子MindSpore与Pytorch存在一些差异,参考资料: MindSpore:与PyTorch实现的功能基本一致,但存在偏置差异和填充差异。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Conv2d.html +行 10:列 19: [已转换] 'nn.Linear' 已经被转换为了 'nn.Dense'. MindSpore:MindSpore此API实现功能与PyTorch基本一致,而且可以在全连接层后添加激活函数。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Dense.html +行 11:列 19: [已转换] 'nn.Linear' 已经被转换为了 'nn.Dense'. MindSpore:MindSpore此API实现功能与PyTorch基本一致,而且可以在全连接层后添加激活函数。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Dense.html +行 12:列 19: [已转换] 'nn.Linear' 已经被转换为了 'nn.Dense'. MindSpore:MindSpore此API实现功能与PyTorch基本一致,而且可以在全连接层后添加激活函数。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Dense.html +行 14:列 4: [已转换] 'forward' 已经被转换为了 'construct'. +行 16:列 14: [已转换] 'F.relu' 已经被转换为了 'ops.relu'. +行 17:列 14: [已转换] 'F.max_pool2d' 已经被转换为了 'ops.MaxPool'. +行 18:列 14: [已转换] 'F.relu' 已经被转换为了 'ops.relu'. +行 19:列 14: [已转换] 'F.max_pool2d' 已经被转换为了 'ops.MaxPool'. +行 20:列 23: [已转换] 'out.size' 已经被转换为了 'out.shape'. +行 21:列 14: [已转换] 'F.relu' 已经被转换为了 'ops.relu'. +行 22:列 14: [已转换] 'F.relu' 已经被转换为了 'ops.relu'. +[转换完毕] +``` + +报告文件将给出识别到的每个pytorch算子的转换方式,由于Mindspore和Pytorch框架存在诸多差异,建议用户参考报告内容,对转换后的模型代码再进行适当的微调。 + +2. 对于不支持模型的转换,需要根据报告文件进行微调 + +例如在`covLSTM.py`模型中,mindconverter对于`torch.zeros`的元组传入参数不支持, + +```python +import torch.nn as nn +import torch + +... +class ConvLSTMCell(nn.Module): + + def __init__(self, input_dim, hidden_dim, kernel_size, bias): + ... + + def forward(self, input_tensor, cur_state): + h_cur, c_cur = cur_state + + combined = torch.cat([input_tensor, h_cur], dim=1) + + combined_conv = self.conv(combined) + cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1) + i = torch.sigmoid(cc_i) + f = torch.sigmoid(cc_f) + o = torch.sigmoid(cc_o) + g = torch.tanh(cc_g) + + c_next = f * c_cur + i * g + h_next = o * torch.tanh(c_next) + + return h_next, c_next + + def init_hidden(self, batch_size, image_size): + height, width = image_size + return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device), + torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device)) + +...... +``` + +在输出报告中对应代码位置将显示 + +``` +行 48:列 12: [已转换] 'torch.tanh' 已经被转换为了 'ops.tanh'. +行 51:列 21: [已转换] 'torch.tanh' 已经被转换为了 'ops.tanh'. +行 57:列 16: [未转换] 'torch.zeros' 没有进行转换,建议您自行进行参数转换 +行 58:列 16: [未转换] 'torch.zeros' 没有进行转换,建议您自行进行参数转换 +行 61:列 15: [已转换] 'nn.Module' 已经被转换为了 'nn.Cell'. +``` + +而在输出代码结果中当前代码并没有被转换并且会进行报错 + +```python +import mindspore.nn as nn +import mindspore.experimental.optim as optim +import mindspore.ops +import mindspore.ops as ops + + +class ConvLSTMCell(nn.Cell): + + def __init__(self, input_dim, hidden_dim, kernel_size, bias): + ... + def construct(self, input_tensor, cur_state): + h_cur, c_cur = cur_state + + combined = ops.cat(tensors=[input_tensor, h_cur], axis=1) + + combined_conv = self.conv(combined) + cc_i, cc_f, cc_o, cc_g = ops.split(tensor=combined_conv, split_size_or_sections=self.hidden_dim, axis=1) + i = ops.sigmoid(input=cc_i) + f = ops.sigmoid(input=cc_f) + o = ops.sigmoid(input=cc_o) + g = ops.tanh(input=cc_g) + + c_next = f * c_cur + i * g + h_next = o * ops.tanh(input=c_next) + + return h_next, c_next + + def init_hidden(self, batch_size, image_size): + height, width = image_size + return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device), #未进行转换 + torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device)) #未进行转换 + +...... +``` + +需要用户手动进行微调,例如: + +```python +torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device) + +ops.zeros((batch_size, self.hidden_dim, height, width), dtype=None) +``` + + + diff --git a/mindconverter/docs/developer.md b/mindconverter/docs/developer.md new file mode 100644 index 0000000000000000000000000000000000000000..97ab10cbc964d46baa290f0bc0212a80f337cc5d --- /dev/null +++ b/mindconverter/docs/developer.md @@ -0,0 +1,83 @@ +## 代码结构 + +``` +--\ +--common\ 包含通用设置,日志和抛出异常 +----__init__.py 初始化common +----exceptions.py 异常处理代码 +----log.py 日志处理代码 +--conf\ 配置文件 +----__init__.py 初始化conf +----constants.py +----defaults.py +--mappings\ pytorch to mindspore映射代码 +----f_mappings.json torch.functional映射 +----nn_mappongs.json torch.nn映射 +----tensor_dot_mappings.json torch.tensor映射 +----torch_dot_mappings.json torch映射 +----torch_optim_mappings.json torch.optim映射 +--ops\ +----f_list.json 包含支持的torch.functional的算子 +----nn_list.json 包含支持的torch.nn的算子 +----tensor_dot_list.jsonn 包含支持的torch.tensor的算子 +----torch_dot_list.json 包含支持的torch的算子 +----torch_optim_list.json 包含支持的torch.optim算子 +--\ +--__init__.py 初始化mindconvert +--_version.py 版本控制 +--ast_edit.py AST语法树的节点编辑 +--cli.py 程序处理入口 +--code_analysis.py 代码结构分析 +--config.py 完成对于mapping映射的相关控制和抛出警告 +--converter.py 运行主程序 +--forward_call.py 在脚本文件中找到forward函数调用的子函数。 +--funcs.py 为某些算子生成显式映射表 +--map_api_download.py 完成对于算子映射表的下载和参数的转换 +--warn_infos\ +----supported_warn_infos.json 支持算子但存在一些差异的算子的相关警告信息 +----unsupported_warn_infos.json mindspore不支持的算子的输出警告信息 +``` + + + +## 维护相关 + +1. 运行map_api_download.py程序将自动下载Mindspore和Pytorch最新版本的算子参数和名称,同时以Json格式存储到mappings文件夹和ops文件夹下 + + - ops文件夹存储的是对于算子的名称 + - mappings文件夹存储的对应算子的参数以及pytorch和mindspore参数的映射 + +2. 注意下载完后,由于文档的差异性/文档格式不标准的问题,仍需手动进行调整,运行`map_api_download.py`时,存在问题的算子会在控制台进行输出,下面是已知存在问题的一下算子 + + - ``` + torch.nn.CeLU + mindspore.nn.Hsigmoid + mindspore.nn.Hswish + mindspore.nn.Identity + mindspore.ops.uniform + mindspore.nn.LogSigmoid + mindspore.nn.LogSoftMax + mindspore.nn.MultiLabelMarginLoss + mindspore.nn.ReLU + mindspore.nn.ReLU6 + mindspore.nn.SeLU + torch.nn.SeLU + mindspore.nn.Sigmoid + mindspore.nn.SiLU + mindspore.nn.Softmax2d + mindspore.nn.Softsign + mindspore.nn.Tanh + mindspore.nn.Tanhshrink + mindspore.ops.clip_by_value + mindspore.ops.clip_by_norm + torch.optim.AdaMax + torch.optim.Optimizer + torch.optim.RMSProp + torch.optim.lr_scheduler.LRScheduler + ``` + +3. 应该确保仓库的mappings文件夹和ops文件夹下的映射关系是最新的 + +4. `warn_infos`文件夹下存放算子的注意信息以及对应文档的链接 + +5. 如果转换过程中遇到问题,只需要找到出问题的算子(控制台会给出),然后在mappings文件夹下找到相应的算子,对算子的参数和映射关系进行调试修改即可 \ No newline at end of file diff --git a/mindconverter/mindconverter/__init__.py b/mindconverter/mindconverter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cae93230a8eb4b210c51c48ab1eb272a5cde9678 --- /dev/null +++ b/mindconverter/mindconverter/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +from mindconverter._version import VERSION + +__version__ = VERSION +__version_info__ = tuple(VERSION.split('.')) + +from mindconverter.cli import cli_entry + +__all__ = [ + 'pytorch2mindspore', + '__version__', + '__version_info__' +] \ No newline at end of file diff --git a/mindconverter/mindconverter/__main__.py b/mindconverter/mindconverter/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..07f72063c93593252fc1213f797cbf55fd996c01 --- /dev/null +++ b/mindconverter/mindconverter/__main__.py @@ -0,0 +1,19 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""MindConverter main module.""" + +from mindconverter.cli import cli_entry + +cli_entry() \ No newline at end of file diff --git a/mindconverter/mindconverter/_version.py b/mindconverter/mindconverter/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..ce546733a763f0375c74f6b1b677db7e016443e7 --- /dev/null +++ b/mindconverter/mindconverter/_version.py @@ -0,0 +1,17 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""MindConverter version module.""" + +VERSION = '1.0.0' \ No newline at end of file diff --git a/mindconverter/mindconverter/ast_edits.py b/mindconverter/mindconverter/ast_edits.py new file mode 100644 index 0000000000000000000000000000000000000000..00cd51f70f1818a206290eaf9c6333d855e43f40 --- /dev/null +++ b/mindconverter/mindconverter/ast_edits.py @@ -0,0 +1,884 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless REQUIRED by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Convert for Python scripts according API mapping information.""" + +import ast +import logging +import re +from enum import Enum + +import pasta +from pasta.base import formatting as fmt + +from mindconverter.code_analysis import CodeAnalyzer +from mindconverter.code_analysis import APIAnalysisSpec +from mindconverter.config import ALL_MAPPING +from mindconverter.config import NN_LIST +from mindconverter.config import ALL_TORCH_APIS +from mindconverter.config import ALL_2P_LIST +from mindconverter.config import TENSOR_DOT_LIST +from mindconverter.config import get_prompt_info +from mindconverter.common.log import logger +from mindconverter.common.exceptions import NodeTypeNotSupport +from mindconverter.forward_call import ForwardCall + +LOG_FMT_INSERT = "[插入] '%s' 已经插入到被转换为文件中" +LOG_FMT_CONVERT = "[已转换] '%s' 已经被转换为了 '%s'." +LOG_FMT_CONVERT_WITH_TIPS = "[已转换] '%s' 已经被转换为了 '%s'. %s" +LOG_FMT_NOT_CONVERT = "[未转换] '%s' 没有进行转换,建议您自行进行参数转换 %s" +LOG_FMT_PROMPT_INFO = "[信息] %s" +LOG_SUGGESTION_MANUAL_CONVERT = "请注意手动转换代码以及与其关联的代码。" + + +class ApiMatchingEnum(Enum): + """Node edge type enum.""" + NOT_API = 'not an api name' + API_INFER = 'infer api name to map' + API_STANDARD = 'api name in the correct format' + API_FOUND = 'found an api name in api list' + API_MATCHED = 'api is matched to map' + + +class _ConvertReport: + """Report log of converting source code.""" + + def __init__(self, is_stub=False): + self._is_stub = is_stub + self._max_line = 0 + self._log_head = [] + self._log_body = [] # report log, type is (severity, line, col, msg) + + def _add_log(self, severity, line, col, msg): + """Add log.""" + if self._is_stub: + return + if line is None and col is None: + self._log_head.append(msg) + return + if isinstance(line, int) and isinstance(col, int): + self._log_body.append((severity, line, col, msg)) + if self._max_line < line: + self._max_line = line + else: + raise TypeError('【错误】参数类型存在错误') + + def info(self, line, col, msg): + """Interface to add infer log""" + self._add_log(logging.INFO, line, col, msg) + + def warning(self, line, col, msg): + """Interface to add warning log""" + self._add_log(logging.WARNING, line, col, msg) + + def header_msg(self, msg): + """Interface to add header message log""" + self._add_log(logging.INFO, None, None, msg) + + def get_logs(self): + """Get convert logs""" + logs = [] + logs.extend(self._log_head) + # sort rule: line * self._max_line + col + self._log_body.sort(key=lambda log: log[1] * self._max_line + log[2]) + for log_info in self._log_body: + log_info = "行 %d:列 %d: %s" % (log_info[1], log_info[2], log_info[3]) + if logs: + # Deduplication for logs + if logs[-1] != log_info: + logs.append(log_info) + else: + logs.append(log_info) + return logs + + +class _LineColEditVisitor(ast.NodeVisitor): + """ + Update line number and col offset of ast node. + + Use the line and column number of the original code to update + the line and column number of the new code replaced with the original code. + """ + + class _NodeInfo: + """NodeInfo class definition.""" + + def __init__(self, node): + self.node = node + self.call_list = [] # Used to save all ast.Call node in self._node + + def __init__(self): + self._dst_node_info = None + self._src_node_info = None + self._visiting = self._src_node_info # Used to point to the visiting node + + def update(self, replace_with_node, src_node): + """Update the line and column number of the new code replaced with the original code.""" + replace_with_node.lineno = src_node.lineno + replace_with_node.col_offset = src_node.col_offset + self._dst_node_info = self._NodeInfo(replace_with_node) + self._src_node_info = self._NodeInfo(src_node) + self._visiting = self._src_node_info + self.visit(self._visiting.node) + + self._visiting = self._dst_node_info + self.visit(self._visiting.node) + + self._update_line_col() + + def visit_Call(self, node): + """Callback function when visit AST tree""" + self._visiting.call_list.append(node) + self.generic_visit(node) + + def _update_line_col(self): + """Update the line and column number information for all ast.Call node.""" + dst_call_list = list(self._dst_node_info.call_list) + src_call_list = list(self._src_node_info.call_list) + len_diff = len(dst_call_list) - len(src_call_list) + + # After MindSpore api replaces Torch api, more calls are generated. + # For example, out.view() is replaced with P.Reshape()(out). + # out.view() has only one call, but P.Reshape()(out) has two calls. + # To match the replaced calls, the calls of out.view is padded to the same quantity. + if len_diff > 0: + src_call_list = [src_call_list[0]] * len_diff + src_call_list + + for dst_call, src_call in zip(dst_call_list, src_call_list): + dst_call.lineno = src_call.lineno + dst_call.col_offset = src_call.col_offset + + if not dst_call.args: + continue + + # When out.size().view(1, ...) transforms to P.Reshape()(out.size(), 1, ...), + # in this case, the column of parameter out.size() will be bigger than the following parameters. + # To ensure the sequence of parameters, adjust the column of the second parameter. + args = [] + for arg in dst_call.args: + if self._check_arg2update(arg): + args.append(arg) + for arg in args: + # line number starts from 1, column number starts from 0. + arg.lineno += dst_call.lineno - 1 + arg.col_offset += dst_call.col_offset + + @staticmethod + def _check_arg2update(arg): + # When the arg is a function call, its col_offset is handled separately. + if not isinstance(arg, ast.Call): + return True + return False + + +class AstEditVisitor(ast.NodeVisitor): + """AST Visitor that process function calls. + + Converts function calls from torch api to MindSpore api using api mapping information. + """ + + def __init__(self): + self._process_log = _ConvertReport() + self._tree = None + self._code_analyzer = None + self._stack = [] # Used to easily access the parent node + self._forward_list = {} + self._is_forward_function = False # Used to allow access the visiting function forward attribute + self._new_call_nodes = [] # Used to save new ast.call nodes + + def process(self, ast_tree): + """ + Convert source code to MindSpore code. + + Args: + ast_tree (AST): The root node of the source code. + """ + self.__init__() + self._tree = ast_tree + self._code_analyzer = CodeAnalyzer() + self._code_analyzer.process(self._tree) + + self._forward_list = ForwardCall(self._tree).calls + # replace python function under nn.Module + self._convert_api() + + # replace external reference statements + self._convert_external_reference() + + def get_logs(self): + """Get conversion report.""" + return self._process_log.get_logs() + + def _convert_cell(self, cell_scope): + """ + Convert a PyTorch Module class into MindSpore Cell class. + + Args: + cell_scope (pasta.base.Scope): The network class definition node inherits from torch.nn.Module. + """ + cell_ast_node = cell_scope.node + line_no = cell_ast_node.lineno + logger.info("Line %3d: 开始进行转换 nn.Module %s", line_no, self._code_analyzer.get_name(cell_ast_node)) + + class_elements = self._code_analyzer.network_definitions()['cell'] + # 1. 首先需要更新函数的定义 + for func_scope in class_elements.get(cell_scope, []): + self._update_function_def(func_scope) + + # 2. 更新类别的base name + self._update_base_name(cell_scope) + + def _update_base_name(self, class_def_scope): + """ + Update base name of class. + + Args: + class_def_scope (ast.ClassDef): Class definition node. + """ + base_name_mapping = APIAnalysisSpec.base_name_mapping + class_def_node = class_def_scope.node + base_class_nodes = class_def_scope.node.bases + # 更新类别的base name + for base_class_node in base_class_nodes: + base_name = base_class_node.attr + if base_name in APIAnalysisSpec.get_network_base_class_names(): + old_code = pasta.dump(base_class_node) + if base_name in base_name_mapping: + new_code = 'nn.' + base_name_mapping[base_class_node.attr] + new_node = pasta.parse(new_code) + pasta.ast_utils.replace_child(class_def_node, base_class_node, new_node) + self._process_log.info(base_class_node.lineno, base_class_node.col_offset, LOG_FMT_CONVERT % + (old_code, new_code)) + else: + self._process_log.info(base_class_node.lineno, base_class_node.col_offset, LOG_FMT_NOT_CONVERT % + (old_code, '')) + + @staticmethod + def _modify_function_name(func_def_node, new_func_name): + """Modify function name""" + if not isinstance(func_def_node, ast.FunctionDef): + raise NodeTypeNotSupport('【错误】该结点不是一个 ast.FunctionDef 节点类型,您可以查看对应算子的Mapping表是否存在问题。') + + old_func_name = func_def_node.name + func_def_node.name = new_func_name + + # Modify formatting information stored by pasta + old_function_def = fmt.get(func_def_node, 'function_def') + if old_function_def: + new_function_def = old_function_def.replace(old_func_name, new_func_name) + fmt.set(func_def_node, 'function_def', new_function_def) + fmt.set(func_def_node, 'name__src', new_func_name) + + def _update_function_def(self, func_scope): + """ + Convert a PyTorch function into MindSpore function. + + Args: + func_scope (pasta.base.scope.Scope): The node scope of function definition. + """ + is_forward = self._judge_forward(func_scope) + # 1. 转换函数的内容 + self._convert_function(func_scope, is_forward) + + # 2. 如果是forward函数则修改函数的名称 + func_ast_node = func_scope.node + old_func_name = 'forward' + new_func_name = 'construct' + if func_ast_node.name == old_func_name: + self._modify_function_name(func_ast_node, new_func_name) + real_line_number = self._get_real_line_number(func_ast_node) + self._process_log.info(real_line_number, func_ast_node.col_offset, + LOG_FMT_CONVERT % (old_func_name, new_func_name)) + + def _convert_api(self): + """Convert PyTorch api call to MindSpore api call in a function.""" + tasks = [] + found_func_nodes = [] + convert_elements = self._code_analyzer.network_definitions() + for func_node_scope in convert_elements.get("functions", []): + found_func_nodes.append(func_node_scope.node) + is_forward = self._judge_forward(func_node_scope) + tasks.append((self._convert_function, (func_node_scope, is_forward))) + for class_scope, func_scopes in convert_elements.get("cell", []).items(): + for func_node_scope in func_scopes: + found_func_nodes.append(func_node_scope.node) + tasks.append((self._convert_cell, (class_scope,))) + + # Some functions in the forward call chain are not found by self._code_analyzer. + for func_node in self._forward_list.values(): + is_forward = True + if func_node and func_node not in found_func_nodes: + func_node_scope = self._code_analyzer.lookup_scope(func_node) + tasks.append((self._convert_function, (func_node_scope, is_forward))) + + for convert_fun, args in tasks: + convert_fun(*args) + + @staticmethod + def _dump_without_prefix(node): + """Get the python source for an AST.""" + pos = 0 + source_prefix = pasta.base.formatting.get(node, 'prefix') + if source_prefix: + pos = len(source_prefix) + source_code = pasta.dump(node) + return source_code[pos:] + + @staticmethod + def _get_real_line_number(node): + """Get the real line number of the node.""" + try: + line_number = node.lineno + len(node.decorator_list) + except AttributeError: + line_number = node.lineno + return line_number + + def _replace_external_reference(self): + """ + Replace external reference statements. + + Returns: + dict, key is external name, value is the new replaced node. + """ + all_name_mappings = APIAnalysisSpec.import_name_mapping + names_replaced_with = dict() + for ref_info in self._code_analyzer.external_references.values(): + external_ref_info = ref_info['external_ref_info'] + import_node = ref_info['parent_node'] + if import_node is None: + continue + code = self._dump_without_prefix(import_node) + import_parent_node = self._code_analyzer.root_scope.parent(import_node) + # replace import with new name + if external_ref_info.name in APIAnalysisSpec.get_convertible_external_names(): + external_ref_info = ref_info['external_ref_info'] + if external_ref_info.name in all_name_mappings.keys(): + replace_info = all_name_mappings[external_ref_info.name] + new_node = self._make_import(name_to_import=replace_info[0], as_name=replace_info[1]) + new_code = pasta.dump(new_node) + pasta.ast_utils.replace_child(import_parent_node, import_node, new_node) + names_replaced_with.update({external_ref_info.name: new_node}) + self._process_log.info(import_node.lineno, import_node.col_offset, LOG_FMT_CONVERT % + (code.strip(), new_code.strip())) + elif external_ref_info.name.startswith('torch.'): + self._process_log.warning(import_node.lineno, import_node.col_offset, LOG_FMT_NOT_CONVERT % + (code.strip(), LOG_SUGGESTION_MANUAL_CONVERT)) + else: + pass + return names_replaced_with + + def _convert_external_reference(self): + """Convert import statements.""" + all_name_mappings = APIAnalysisSpec.import_name_mapping + + # 1. 首先更改外层引用名 + names_replaced_with = self._replace_external_reference() + new_import_node = dict() + insert_pos = 0 + # 2. 找出源码中未找到的剩余映射名称 + for src_name, new_import_name in all_name_mappings.items(): + if src_name not in names_replaced_with: + new_node = self._make_import(name_to_import=new_import_name[0], as_name=new_import_name[1]) + new_import_node.update({insert_pos: new_node}) + insert_pos += 1 + else: + try: + # 如果最后一个名字被替换,则在最后一个名字后面插入 pos + replaced_with_node = names_replaced_with[src_name] + insert_pos = self._tree.body.index(replaced_with_node) + 1 + except ValueError: + pass + + # 3. 按顺序导入外层引用库 + insert_cnt = 0 + for insert_pos, new_node in new_import_node.items(): + # 将节点插入模块 + self._tree.body.insert(insert_pos + insert_cnt, new_node) + new_code = self._dump_without_prefix(new_node) + self._process_log.header_msg(LOG_FMT_INSERT % new_code.strip()) + insert_cnt += 1 + + @staticmethod + def _make_import(name_to_import, as_name=None): + """ + Create an import to the ast tree. + + Args: + name_to_import: (string) The absolute name to import. + as_name: (string) The alias for the import ("import name_to_import as asname") + + Returns: + ast.Import, a new ast.Import node. + """ + new_alias = ast.alias(name=name_to_import, asname=as_name) + import_node = ast.Import(names=[new_alias]) + return import_node + + def _convert_function(self, func_scope, is_forward): + """ + Convert a PyTorch function into MindSpore function. + + Args: + func_scope (pasta.base.scope.Scope): The node scope of function definition. + is_forward (boolean): If the function is defined in forward function in nn.Module in torch. + """ + func_ast_node = func_scope.node + line_no = func_ast_node.lineno + logger.info("Line %3d: start converting function %s()", line_no, func_ast_node.name) + + parent = func_scope.parent_scope.node + self._stack.clear() + self._new_call_nodes.clear() + if parent: + self._stack.append(parent) + + self._is_forward_function = is_forward + self.visit(func_scope.node) + + def _judge_forward(self, func_scope): + """ + Check if function is a forward function. + + Args: + func_scope (pasta.base.scope.Scope): The node scope of function definition. + + Returns: + boolean, True or False + """ + is_forward = func_scope.node in self._forward_list.values() + if is_forward: + logger.debug("%s is a forward function", self._code_analyzer.get_name(func_scope)) + return is_forward + + # 重写以维护堆栈信息以访问父节点 + def visit(self, node): + """Visit a ast tree.""" + self._stack.append(node) + super(AstEditVisitor, self).visit(node) + self._stack.pop() + + def _mapping_standard_api_name(self, api_name): + """Get mapping from external reference name to standard external reference name""" + standard_name = api_name + if not self._code_analyzer.is_standard_external_ref: + # key:表示实际的ref 名称,value:标准的ref 名称 + mapping_names = self._mapping_standard_external_ref() + api_name_parts = api_name.split('.') + api_name_parts[0] = mapping_names.get(api_name_parts[0], api_name_parts[0]) + standard_name = '.'.join(api_name_parts) + return standard_name + + def _infer_api_name(self, call_func_node, check_context=True): + """Infer the call name. + + Examples: + 1. nn.Sequential inferred to nn.Sequential + 2. mmm.size inferred to .size if import torch.nn as nn + 3. mmm.size inferred to mmm.size if import torch.nn as mmm + """ + match_case = ApiMatchingEnum.NOT_API + api_name = None + call_name = pasta.dump(call_func_node) + + is_include_sub_call = self._is_include_sub_call(call_func_node) + if is_include_sub_call: + # x.y().z splits to ['x.y()', 'z'] + name_attributes = call_name.rsplit('.', 1) + else: + # x.y.z splits to ['x', 'y', 'z'] + name_attributes = call_name.split('.') + + # rewritten external module name + # e.g., mm.ReLU will be written to nn.ReLU if 'import torch.nn as mm' in script. + if check_context and not self._code_analyzer.is_standard_external_ref: + standard_name = self._mapping_standard_api_name(name_attributes[0]) + else: + standard_name = name_attributes[0] + + if standard_name in ["nn", "F", "torch", "optim"]: + match_case = ApiMatchingEnum.API_STANDARD + api_name = call_name + else: + # only infer function for tensor object. + # e.g., api_call_name is out.view, .view is an api name for out which is maybe a tensor object. + # e.g., 'xxxx'.size can be not inferred to .size, because string is not a tensor object. + if self._check_tensor_object(call_func_node): + api_name = '.' + name_attributes[-1] + match_case = ApiMatchingEnum.API_INFER + return api_name, match_case + + def _check_tensor_object(self, node): + """Check whether the reference object of the node is a tensor object.""" + if not isinstance(node, (ast.Attribute, ast.Name)): + return False + name_attributes = self._dump_without_prefix(node).split('.') + node_ref_name = name_attributes[0] + if re.search(r'\W', node_ref_name) or len(name_attributes) == 1: + return False + + func_name = '.' + name_attributes[-1] + if func_name not in TENSOR_DOT_LIST: + return False + + is_tensor_object = True + if self._code_analyzer: + # Check whether the object is external reference. + for ref_name in self._code_analyzer.external_references: + if node_ref_name == ref_name: + is_tensor_object = False + break + return is_tensor_object + + @staticmethod + def _is_include_sub_call(call_func_node): + """"Inspect a sub call in call expression. + + Examples: + 1. nn.functional.relu() return False + 2. nn.functional.relu(out).size() return True. nn.functional.relu(out) is sub call. + 3. nn.functional.relu(out=out.size()).size() return False. out.size() is not sub call of argument. + """ + is_include_call = False + try: + sub_node = call_func_node + while sub_node and not isinstance(sub_node, ast.Call): + sub_node = sub_node.value + if isinstance(sub_node, ast.Call): + is_include_call = True + except AttributeError: + is_include_call = False + return is_include_call + + def match_api(self, call_func_node, is_forward, check_context=True): + """ + Check api name to convert, check api name ok with a is_forward condition. + + Args: + call_func_node (ast.Attribute): The call.func node. + is_forward (bool): whether api belong to forward. + check_context (boolean): If True, the code context will be checked. Default is True. + + Returns: + str, the standard api name used to match. + ApiMappingEnum, the match result. + """ + match_case = ApiMatchingEnum.NOT_API + api_call_name = pasta.dump(call_func_node) + + if api_call_name.startswith('self.'): + return api_call_name, match_case + + api_name, match_case = self._infer_api_name(call_func_node, check_context) + api_call_name = pasta.dump(call_func_node) + + is_tensor_obj_call = False + if api_name != api_call_name: + is_tensor_obj_call = True + + standard_api_call_name = api_name + # rewritten external module name + # e.g., mm.ReLU will be written to nn.ReLU if 'import torch.nn as mm' in script. + if not is_tensor_obj_call: + standard_api_call_name = self._get_api_whole_name(call_func_node, check_context) + # print("++++++DEBUG_NAME3+++++") + # print(standard_api_call_name) + if standard_api_call_name in ALL_TORCH_APIS: + match_case = ApiMatchingEnum.API_FOUND + # print("+++++++DEBUG_NAME4+++++") + # print(standard_api_call_name) + # Debug is_forward + if (not is_forward and standard_api_call_name in ALL_TORCH_APIS) or \ + (is_forward and standard_api_call_name in ALL_2P_LIST): + # print("********DEBUG********") + # print(standard_api_call_name) + # print(match_case) + # print(is_forward) + match_case = ApiMatchingEnum.API_MATCHED + # print("********DEBUG********") + else: + if standard_api_call_name and standard_api_call_name.startswith('torch.nn.init'): + match_case = ApiMatchingEnum.API_MATCHED + return standard_api_call_name, match_case + + @staticmethod + def _get_call_parameters_str(call_node): + """Get parameters string for a call node.""" + if not isinstance(call_node, ast.Call): + raise NodeTypeNotSupport('It is not ast.Call node type.') + parameters_str = '' + call_str = pasta.dump(call_node) + call_name = pasta.dump(call_node.func) + last_parameter_str = '' + + if call_node.args: + last_parameter_str = pasta.dump(call_node.args[-1]) + if call_node.keywords: + last_parameter_str = pasta.dump(call_node.keywords[-1]) + if last_parameter_str: + left_parenthesis_pos = call_str.find(call_name) + len(call_name) + # call is like abc.call(a, b,), last parameter is b, + # but parameters string must have last ',' character after the last parameter b. + last_parameter_pos = call_str.rfind(last_parameter_str) + len(last_parameter_str) + right_parenthesis_pos = call_str.find(')', last_parameter_pos) + + # parameters start pos must skip '(' character for calling. + parameters_str = call_str[left_parenthesis_pos + 1:right_parenthesis_pos] + return parameters_str + + def _get_api_whole_name(self, call_func_node, check_context=True): + """ + Get the whole name for the call node. + + Args: + call_func_node (AST): The func attribute of ast.Call. + check_context (boolean): If True, the code context will be checked. Default is True. + + Returns: + str, the whole name. + """ + api_name, match_case = self._infer_api_name(call_func_node, check_context) + if match_case == ApiMatchingEnum.API_STANDARD: + api_name_splits = api_name.split('.') + api_name_splits[0] = self._get_external_ref_whole_name(api_name_splits[0]) + if api_name_splits[0]: + api_name = '.'.join(api_name_splits) + return api_name + + def mapping_api(self, call_node, check_context=True): + """ + Convert api_name in code to MindSpore api, if api_name is a python api, code will not convert. + + If do not check context of the script, the code represented by the node must be written in the standard way. + + Args: + call_node (ast.Call): The ast node to convert. + check_context (boolean): If True, the code context will be checked. Default is True. + + Returns: + str, the converted code. + """ + if not isinstance(call_node, ast.Call): + raise NodeTypeNotSupport("It is not ast.Call node.") + code = pasta.dump(call_node) + api_call_name = pasta.dump(call_node.func) + if api_call_name.startswith('self.'): + return code + + new_code = self._mapping_api(call_node, check_context) + + return new_code + + def _mapping_api(self, call_node, check_context=True): + """ + Convert api_name in code to MindSpore api, if api_name is a python api, code will not convert. + + If do not check context of the script, the code represented by the node must be written in the standard way. + + Args: + call_node (ast.Call): The ast node to convert. + check_context (boolean): If True, the code context will be checked. Default is True. + + Returns: + str, the converted code. + """ + code = pasta.dump(call_node) + api_call_name = pasta.dump(call_node.func) + + # find full api expected to be converted. eg:expr="nn.Conv2d(1,2,3)" args_str="(1,2,3)" + args_str = '(' + self._get_call_parameters_str(call_node) + ')' + + try: + api_name, _ = self._infer_api_name(call_node.func, check_context) + standard_api_call_name = api_call_name + if api_name != api_call_name: + # api name .view inferred from out.view, split tensor object name is out + tensor_obj_name = api_call_name[:-len(api_name)] + map_helper = ALL_MAPPING[api_name] + new_code = map_helper.convert(tensor_obj_name, args_str) + else: + # change to external ref name + # e.g., mm.ReLU will be changed to nn.ReLU if 'import torch.nn as mm' in script. + if check_context and not self._code_analyzer.is_standard_external_ref: + standard_api_call_name = self._mapping_standard_api_name(api_name) + + map_helper = ALL_MAPPING[standard_api_call_name] + new_code = map_helper.convert(standard_api_call_name, args_str) + except KeyError: + return code + + return new_code + + @staticmethod + def _get_detail_prompt_msg(old_node, new_node): + """Get detail converted prompt information.""" + msg = None + if isinstance(old_node, ast.Call) and isinstance(new_node, ast.Call): + old_api_name = pasta.dump(old_node.func) + new_api_name = pasta.dump(new_node.func) + if new_api_name == old_api_name: + old_parameter_num = len(old_node.args) + len(old_node.keywords) + new_parameter_num = len(new_node.args) + len(new_node.keywords) + if old_parameter_num > 1: + msg = '参数已转换,本算子MindSpore与Pytorch存在一些差异,参考资料:' + else: + if old_parameter_num == 0 and new_parameter_num == 0: + msg = '该API名称已经被转换为了MindSporeAPI名称' + else: + msg = '参数已转换,本算子MindSpore与Pytorch存在一些差异,参考资料:' + return msg + + def _convert_call(self, node, matched_api_name): + """"Convert the call node.""" + # print("********DEBUG********") + # print(node) + # print(matched_api_name) + # print("********DEBUG********") + new_node = None + code = pasta.dump(node) + api_name = pasta.dump(node.func) + warning_info = get_prompt_info(matched_api_name) + if warning_info is None: + warning_info = '' + if matched_api_name in ALL_MAPPING: + # print("********DEBUG_ISCONVERT********") + logger.info("Line %3d start converting API: %s", node.lineno, api_name) + new_code = self.mapping_api(node) + if new_code != code: + try: + new_node = pasta.parse(new_code).body[0].value + # find the first call name + new_api_name = new_code[:new_code.find('(')] + detail_msg = self._get_detail_prompt_msg(node, new_node) + if detail_msg: + warning_info = detail_msg + ' ' + warning_info + except AttributeError: + new_node = pasta.parse(new_code).body[0] + new_api_name = new_code + self._process_log.info(node.lineno, node.col_offset, + LOG_FMT_CONVERT_WITH_TIPS % (api_name, new_api_name, warning_info)) + else: + logger.warning("Line %3d: 该处找到不支持的API算子: %s%s", node.lineno, api_name, warning_info) + self._process_log.warning(node.lineno, node.col_offset, LOG_FMT_NOT_CONVERT % (api_name, warning_info)) + + return new_node + + def visit_Call(self, node): + """Callback function when visit AST tree""" + code = pasta.dump(node) + api_name = pasta.dump(node.func) + + # The parent node first call is equal to this node, skip when parent node is replaced. + # This scenario occurs, for example, when out.view(out.size(0), -1) is first converted to + # P.Reshape()(out, (out.size(0). -1)), will skip P.Reshape() in following visiting. + # Access from the penultimate element in reverse order. + for parent_node in self._stack[-2::-1]: + if parent_node in self._new_call_nodes and pasta.dump(parent_node).startswith(api_name): + return + parent = self._stack[-2] + new_node = None + new_code = code + matched_api_name, match_case = self.match_api(node.func, self._is_forward_function) + # print("********DEBUG********") + # print(matched_api_name) + # print(match_case) + # print("********DEBUG********") + if match_case in [ApiMatchingEnum.API_INFER, ApiMatchingEnum.API_MATCHED]: + new_node = self._convert_call(node, matched_api_name) + elif match_case in [ApiMatchingEnum.API_STANDARD, ApiMatchingEnum.API_FOUND]: + self._process_log.warning(node.lineno, node.col_offset, LOG_FMT_NOT_CONVERT % (api_name, '')) + else: + pass + + if parent and new_node: + update_line_col = _LineColEditVisitor() + update_line_col.update(new_node, node) + pasta.ast_utils.replace_child(parent, node, new_node) + self._new_call_nodes.append(new_node) + + node = new_node + self._stack[-1] = node + try: + self.generic_visit(node) + except Exception: + logger.error('original code:%s, new code:%s', code, new_code, exc_info=True) + raise + + def _mapping_standard_external_ref(self): + """Obtain the mapping dict of mapping the external references to standard external references.""" + renames = {} + external_refs = self._code_analyzer.external_references + for ref_name, ref_info in external_refs.items(): + external_ref_info = ref_info['external_ref_info'] + if ref_name != 'nn' and external_ref_info.name == 'torch.nn': + renames[ref_name] = 'nn' + elif ref_name != 'F' and external_ref_info.name == 'torch.nn.functional': + renames[ref_name] = 'F' + elif ref_name != 'optim' and external_ref_info.name == 'torch.optim': + renames[ref_name] = 'optim' + return renames + + def _get_external_ref_whole_name(self, ref_name): + """ + Find out external reference whole name. + + For example: + In the parsed source code, there is import statement + import torch.nn as new_name + _get_external_ref_whole_name('new_name') will return 'torch.nn' string. + """ + external_refs = self._code_analyzer.external_references + for external_ref_name, ref_info in external_refs.items(): + external_ref_info = ref_info['external_ref_info'] + if external_ref_name == ref_name: + return external_ref_info.name + return None + + def _check_isinstance_parameter(self, node): + """Check whether the second parameter of isinstance function contains the torch type.""" + is_isinstance_arg = False + # Check whether node is the second parameter of the isinstance function call. + # Access from the penultimate element in reverse order. + for parent_node in self._stack[-2::-1]: + if isinstance(parent_node, ast.Call) and pasta.dump(parent_node.func) == 'isinstance': + isinstance_node = parent_node + seconde_arg_type_nodes = [] + if isinstance(isinstance_node.args[1], ast.Tuple): + seconde_arg_type_nodes.extend(isinstance_node.args[1].elts) + else: + seconde_arg_type_nodes.append(isinstance_node.args[1]) + if node in seconde_arg_type_nodes: + is_isinstance_arg = True + break + if not is_isinstance_arg: + return False + + isinstance_type_arg = pasta.dump(node) + check_torch_type = False + if isinstance_type_arg: + type_splits = isinstance_type_arg.split('.') + whole_name = self._get_external_ref_whole_name(type_splits[0]) + if whole_name and whole_name.startswith('torch'): + check_torch_type = True + if check_torch_type: + _, match_case = self.match_api(node, False) + if match_case != ApiMatchingEnum.NOT_API: + warn_info = '该算子无法识别,可能是无参数函数' + self._process_log.warning(node.lineno, node.col_offset, + LOG_FMT_NOT_CONVERT % (isinstance_type_arg, warn_info)) + return check_torch_type + + def visit_Attribute(self, node): + """Callback function when visit AST tree""" + self._check_isinstance_parameter(node) + self.generic_visit(node) diff --git a/mindconverter/mindconverter/cli.py b/mindconverter/mindconverter/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..ece89b50c33e0b18f65b84b97914a0c770f337e9 --- /dev/null +++ b/mindconverter/mindconverter/cli.py @@ -0,0 +1,215 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Command module.""" +import os +import sys +import argparse + +import mindconverter +from mindconverter.converter import main + + +class FileDirAction(argparse.Action): + """File directory action class definition.""" + + @staticmethod + def check_path(parser, values, option_string=None): + """ + Check argument for file path. + + Args: + parser (ArgumentParser): Passed-in argument parser. + values (object): Argument values with type depending on argument definition. + option_string (str): Optional string for specific argument name. Default: None. + """ + outfile = values + if outfile.startswith('~'): + outfile = os.path.realpath(os.path.expanduser(outfile)) + + if not outfile.startswith('/'): + outfile = os.path.realpath(os.path.join(os.getcwd(), outfile)) + + if os.path.exists(outfile) and not os.access(outfile, os.R_OK): + parser.error(f'{option_string} {outfile} not accessible') + return outfile + + def __call__(self, parser, namespace, values, option_string=None): + """ + Inherited __call__ method from argparse.Action. + + Args: + parser (ArgumentParser): Passed-in argument parser. + namespace (Namespace): Namespace object to hold arguments. + values (object): Argument values with type depending on argument definition. + option_string (str): Optional string for specific argument name. Default: None. + """ + outfile_dir = self.check_path(parser, values, option_string) + if os.path.isfile(outfile_dir): + parser.error(f'{option_string} {outfile_dir} is a file') + + setattr(namespace, self.dest, outfile_dir) + + +class OutputDirAction(argparse.Action): + """File directory action class definition.""" + + def __call__(self, parser, namespace, values, option_string=None): + """ + Inherited __call__ method from argparse.Action. + + Args: + parser (ArgumentParser): Passed-in argument parser. + namespace (Namespace): Namespace object to hold arguments. + values (object): Argument values with type depending on argument definition. + option_string (str): Optional string for specific argument name. Default: None. + """ + output = values + if output.startswith('~'): + output = os.path.realpath(os.path.expanduser(output)) + + if not output.startswith('/'): + output = os.path.realpath(os.path.join(os.getcwd(), output)) + + if os.path.exists(output): + if not os.access(output, os.R_OK): + parser.error(f'{option_string} {output} not accessible') + + if os.path.isfile(output): + parser.error(f'{option_string} {output} is a file') + + setattr(namespace, self.dest, output) + + +class InFileAction(argparse.Action): + """Input File action class definition.""" + + def __call__(self, parser, namespace, values, option_string=None): + """ + Inherited __call__ method from argparse.Action. + + Args: + parser (ArgumentParser): Passed-in argument parser. + namespace (Namespace): Namespace object to hold arguments. + values (object): Argument values with type depending on argument definition. + option_string (str): Optional string for specific argument name. Default: None. + """ + outfile_dir = FileDirAction.check_path(parser, values, option_string) + if not os.path.exists(outfile_dir): + parser.error(f'{option_string} {outfile_dir} not exists') + + if not os.path.isfile(outfile_dir): + parser.error(f'{option_string} {outfile_dir} is not a file') + + setattr(namespace, self.dest, outfile_dir) + + +class LogFileAction(argparse.Action): + """Log file action class definition.""" + + def __call__(self, parser, namespace, values, option_string=None): + """ + Inherited __call__ method from FileDirAction. + + Args: + parser (ArgumentParser): Passed-in argument parser. + namespace (Namespace): Namespace object to hold arguments. + values (object): Argument values with type depending on argument definition. + option_string (str): Optional string for specific argument name. Default: None. + """ + outfile_dir = FileDirAction.check_path(parser, values, option_string) + if os.path.exists(outfile_dir) and not os.path.isdir(outfile_dir): + parser.error(f'{option_string} {outfile_dir} is not a directory') + setattr(namespace, self.dest, outfile_dir) + + +def cli_entry(): + """Entry point for mindconverter CLI.""" + + permissions = os.R_OK | os.W_OK | os.X_OK + os.umask(permissions << 3 | permissions) + + parser = argparse.ArgumentParser( + prog='mindconverter', + description='MindConverter CLI entry point (version: {})'.format(mindconverter.__version__)) + + parser.add_argument( + '--version', + action='version', + version='%(prog)s ({})'.format(mindconverter.__version__), + help="显示Mindconverter的版本信息") + + parser.add_argument( + '--in_file', + type=str, + action=InFileAction, + required=True, + help=""" + 指定Pytorch模型脚本文件的路径。 + """) + + parser.add_argument( + '--output', + type=str, + action=OutputDirAction, + default=os.path.join(os.getcwd(), 'output'), + help=""" + 指定转换后的模型文件目录的路径。 + 默认是当前工作目录中的output目录。 + """) + + parser.add_argument( + '--report', + type=str, + action=LogFileAction, + default=os.getcwd(), + help=""" + 指定报告的输出目录,默认是当前工作目录。 + """) + + argv = sys.argv[1:] + if not argv: + argv = ['-h'] + args = parser.parse_args(argv) + else: + args = parser.parse_args() + mode = permissions << 6 + os.makedirs(args.output, mode=mode, exist_ok=True) + os.makedirs(args.report, mode=mode, exist_ok=True) + _run(args.in_file, args.output, args.report) + + +def _run(in_files, out_dir, report): + """ + Run converter command. + + Args: + in_files (str): The file path or directory to convert. + out_dir (str): The output directory to save converted file. + report (str): The report file path. + """ + files_config = { + 'root_path': in_files if in_files else '', + 'in_files': [], + 'outfile_dir': out_dir, + 'report_dir': report + } + if os.path.isfile(in_files): + files_config['root_path'] = os.path.dirname(in_files) + files_config['in_files'] = [in_files] + else: + for root_dir, _, files in os.walk(in_files): + for file in files: + files_config['in_files'].append(os.path.join(root_dir, file)) + main(files_config) \ No newline at end of file diff --git a/mindconverter/mindconverter/code_analysis.py b/mindconverter/mindconverter/code_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..18e0de228f147f2f4d06f89b715b30829fac96df --- /dev/null +++ b/mindconverter/mindconverter/code_analysis.py @@ -0,0 +1,406 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless REQUIRED by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""code analysis module""" +import ast + +import pasta +from pasta.base import scope + +from mindconverter.common.exceptions import ScriptNotSupport + + +class APIAnalysisSpec: + """API analysis specifications""" + + import_name_mapping = {'torch': ['mindspore.ops', None], + 'torch.nn': ['mindspore.nn', 'nn'], + 'torch.optim': ['mindspore.experimental.optim', 'optim'], + 'torch.nn.functional': ['mindspore.ops', 'ops']} + + base_name_mapping = {'Module': 'Cell', + 'Sequential': 'SequentialCell' + } + + @classmethod + def get_convertible_external_names(cls): + """ + Obtain the convertible external names. + + The external name is the full dotted name being referenced. + """ + return cls.import_name_mapping.keys() + + @staticmethod + def get_network_base_class_names(): + """Obtain the base names which network class base from""" + return ['Module', + 'Sequential', + 'ModuleList', + 'ModuleDict', + 'ParameterList', + 'ParameterDict'] + + @staticmethod + def check_external_alias_ref(ref_name, external_name): + """ + Check 'import as' is standard. + + Standard references are follow: + import torch.nn as nn + import torch.nn.functional as F + + Args: + ref_name (str): The name that refers to the external_name. + external_name (str): The full dotted name being referenced. For examples: + 1. 'import torch.nn as nn', torch.nn is external_name, nn is ref_name. + 2. 'from torch import nn as mm, torch.nn is external_name, mm is ref_name which is not a standard name. + + Returns: + boolean, True if ref_name is standard else False. + """ + if ref_name != 'nn' and external_name == 'torch.nn': + is_standard = False + elif ref_name != 'F' and external_name == 'torch.nn.functional': + is_standard = False + elif ref_name != 'optim' and external_name == 'torch.optim': + is_standard = False + else: + is_standard = True + + return is_standard + + +class CodeAnalyzer(ast.NodeVisitor): + """Code analyzer that analyzes PyTorch python script by AST Visitor. + + CodeAnalyzer find the codes that need to be converted to MindSpore, + and provides the attributes related to the codes. + """ + + def __init__(self): + self._stack = [] # Used to easily access the parent node + self._external_references = {} + self._is_standard_external_ref = True + self._root_scope = None + # Used to save functions that need to be converted, value type is pasta.base.scope.Scope + self._network_functions = [] + + # Used to easily trace the function node + self._functions_stack = [] + + # key type is pasta.base.scope.Scope, value type is list + self._network_classes = {} + + @property + def root_scope(self): + """The root scope of the python script code.""" + return self._root_scope + + @property + def is_standard_external_ref(self): + """Obtain whether the result is a standard external reference.""" + return self._is_standard_external_ref + + @property + def external_references(self): + """Obtain all external references in the analyzed code.""" + return self._external_references + + def network_definitions(self): + """Obtain the network definitions which need to be converted.""" + return {"functions": self._network_functions, + "cell": self._network_classes} + + def process(self, ast_tree): + """ + Start to analyze the code. + + Args: + ast_tree (AST): The root node of the source code. + """ + self.__init__() + self._root_scope = scope.analyze(ast_tree) + self._pre_process() + self.visit(ast_tree) + if not self._network_classes: + msg = "您的脚本中没有找到模型的定义部分" + raise ScriptNotSupport(msg) + + @staticmethod + def _check_external_standard(external_refs): + """Check whether all external references are standard.""" + is_standard = True + for external_name, external_ref_info in external_refs.items(): + is_standard = APIAnalysisSpec.check_external_alias_ref(external_name, external_ref_info.name) + if not is_standard: + break + return is_standard + + def _is_base_from_cell(self, node): + """ + Check whether the node bases from cell classes which are defined in APIAnalysisSpec. + + Args: + node (ast.ClassDef): The node which is a class definition. + + Returns: + boolean, True if the check result is Passed else False. + """ + if self._is_ref_convertible_imports(node): + whole_name = self._get_whole_name(node) + if whole_name.split('.')[-1] in APIAnalysisSpec.get_network_base_class_names(): + return True + return False + + def _pre_process(self): + """Preprocessor checks the code before analyzing.""" + is_torch = False + + # check whether the code imports torch. + for ref_name in self._root_scope.external_references.keys(): + if ref_name.split('.')[0] in APIAnalysisSpec.get_convertible_external_names(): + is_torch = True + break + if not is_torch: + msg = "源代码没有导入torch包,故此没有找到支持的模型的定义" + raise ScriptNotSupport(msg) + + # Find out external reference in the code and save it. + external_refs = self._analyze_import_references(self._root_scope) + self._is_standard_external_ref = self._check_external_standard(external_refs) + self._check_external_standard(external_refs) + for external_name, external_ref_info in external_refs.items(): + self._external_references.update({ + external_name: { + 'external_ref_info': external_ref_info, + 'parent_node': None + } + }) + + @staticmethod + def _analyze_import_references(root_scope): + """ + Find out all references from the import statements. + + Case1: (from)import alias, node_ref.name_ref.id is node_ref.name_ref.definition.asname. + Case2: import without alias, node_ref.name_ref.definition.asname is None. + e.g., import a.b.c, the reference definition id maybe is a, a.b or a.b.c. + The reference id a.b.c is really wanted. + """ + external_name_ref = dict() + all_node_references = [] + for node_references in root_scope.external_references.values(): + all_node_references.extend(node_references) + + for node_ref in all_node_references: + name_ref = node_ref.name_ref + if not name_ref: + continue + definition = name_ref.definition + if node_ref.name_ref.id in [definition.asname, definition.name]: + external_name_ref[name_ref.id] = node_ref + + return external_name_ref + + def visit(self, node): + """Overridden visit of the base class to maintain stack information to access parent node.""" + self._stack.append(node) + super(CodeAnalyzer, self).visit(node) + self._stack.pop() + + @staticmethod + def _get_full_name(node): + """Get the full name of the node.""" + if not isinstance(node, (ast.Attribute, ast.Name)): + return None + return pasta.dump(node) + + def _get_whole_name(self, node): + """ + Get the whole name of the node. + + For example, nn.Module is spliced two nodes, nn node and Module node. + When visit ast nodes, + Module node is first visited, the full name is the same as the whole name, that is nn.Module. + And then nn node is visited, the full name is nn, the whole name is nn.Module. + """ + full_name = self._get_full_name(node) + if not full_name: + return None + whole_name = full_name + # node is in stack top pos + if node is self._stack[-1]: + parent_index = -1 + while isinstance(self._stack[parent_index], ast.Attribute): + parent_index -= 1 + + whole_name = self._get_full_name(self._stack[parent_index]) + return whole_name + + def _is_ref_convertible_imports(self, node): + """Check whether the node references convertible imports.""" + check_result = False + whole_name = self._get_whole_name(node) + if whole_name: + module_name = whole_name.split('.')[0] + for ref_name, ref_info in self._external_references.items(): + external_ref = ref_info['external_ref_info'] + # external reference is convertible module + if external_ref.name in APIAnalysisSpec.get_convertible_external_names(): + # import from the same external module + if module_name == ref_name.split('.')[0]: + check_result = True + break + + return check_result + + @staticmethod + def _get_external_node(external_references, only_convertible=False): + """Get all external reference nodes.""" + external_nodes = {} + for ref_name, ref_info in external_references.items(): + is_add = False + if only_convertible: + if ref_info['external_ref_info'].name in APIAnalysisSpec.get_convertible_external_names(): + is_add = True + else: + is_add = True + if is_add: + external_nodes.update({ref_info['external_ref_info'].node: ref_name}) + return external_nodes + + def _update_external_ref_parent(self, node): + """Set external reference parent node info.""" + external_nodes = self._get_external_node(self._external_references, only_convertible=False) + convertible_external_nodes = self._get_external_node(self._external_references, only_convertible=True) + for name_node in node.names: + if name_node in convertible_external_nodes.keys(): + if len(node.names) > 1: + msg = """\ + Not support multiple imports of torch on one line in your script. line:%s: %s + """ % (node.lineno, pasta.dump(node)) + raise ScriptNotSupport(msg) + if name_node in external_nodes.keys(): + ref_name = external_nodes[name_node] + self._external_references[ref_name]['parent_node'] = node + + @staticmethod + def _get_class_scope(node_scope): + """Find the class scope of the node_scope.""" + parent_scope = node_scope.parent_scope + class_scope = None + while parent_scope: + if isinstance(parent_scope.node, ast.ClassDef): + class_scope = parent_scope + break + parent_scope = parent_scope.parent_scope + return class_scope + + def _update_convertible_functions(self, node): + """Update convertible functions.""" + node_scope = self._root_scope.lookup_scope(node) + class_scope = self._get_class_scope(node_scope) + if class_scope: + network_classes = self._network_classes.get(class_scope, []) + if node_scope not in network_classes: + network_classes.append(node_scope) + else: + if node_scope not in self._network_functions: + self._network_functions.append(node_scope) + + def visit_ClassDef(self, node): + """Callback function when visit AST tree""" + if not self._stack[-1] is node: + return + + for base in node.bases: + if self._is_ref_convertible_imports(base): + self._network_classes[self._root_scope.lookup_scope(node)] = [] + + self.generic_visit(node) + + def _update_external_when_visit(self, node): + """Update external reference when visiting import and import from statements.""" + self._update_external_ref_parent(node) + self.generic_visit(node) + + def visit_Import(self, node): + """Callback function when visit AST tree""" + self._update_external_when_visit(node) + + def visit_ImportFrom(self, node): + """Callback function when visit AST tree""" + self._update_external_when_visit(node) + + def visit_Call(self, node): + """Callback function when visit AST tree""" + if not self._stack[-1] is node: + return + is_in_network_function = False + # If torch call is happened in the function, save the function for network definition. + if self._functions_stack and self._is_ref_convertible_imports(node.func): + self._update_convertible_functions(self._functions_stack[-1]) + is_in_network_function = True + if not is_in_network_function: + self.generic_visit(node) + + def visit_FunctionDef(self, node): + """Callback function when visit AST tree""" + if not self._stack[-1] is node: + return + if node.name == "forward": + self._update_convertible_functions(node) + + self._functions_stack.append(node) + self.generic_visit(node) + self._functions_stack.pop() + + def get_name(self, node): + """ + Get the node name. + + Args: + node (AST): The ast node of the source code. + + Returns: + str, the name of the node + """ + if isinstance(node, pasta.base.scope.Scope): + items = [self.get_name(node.node)] + parent_scope = node.parent_scope + while parent_scope: + if not isinstance(parent_scope.node, ast.Module): + items.append(self.get_name(parent_scope.node)) + parent_scope = parent_scope.parent_scope + return '.'.join(reversed(items)) + if isinstance(node, (ast.ClassDef, ast.FunctionDef)): + return node.name + if isinstance(node, (ast.Name, ast.Attribute)): + return self._get_full_name(node) + return str(node) + + def lookup_scope(self, node): + """ + Search the scope of the node. + + Args: + node (AST): The ast node of the source code. + + Returns: + scope, the scope of the node + """ + if isinstance(node, pasta.base.scope.Scope): + return node + return self._root_scope.lookup_scope(node) diff --git a/mindconverter/mindconverter/common/__init__.py b/mindconverter/mindconverter/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e30774307ca2107b3a81c071ad33c042ef924790 --- /dev/null +++ b/mindconverter/mindconverter/common/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ diff --git a/mindconverter/mindconverter/common/exceptions.py b/mindconverter/mindconverter/common/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..637b76605df48427e66be201231aaba33bd5aad9 --- /dev/null +++ b/mindconverter/mindconverter/common/exceptions.py @@ -0,0 +1,54 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Define custom exception.""" +from enum import unique + +from mindconverter.utils.constant import ScriptConverterErrors +from mindconverter.utils.exceptions import MindInsightException + + +@unique +class ConverterErrors(ScriptConverterErrors): + """Converter error codes.""" + SCRIPT_NOT_SUPPORT = 1 + NODE_TYPE_NOT_SUPPORT = 2 + CODE_SYNTAX_ERROR = 3 + + +class ScriptNotSupport(MindInsightException): + """The script can not support to process.""" + + def __init__(self, msg): + super(ScriptNotSupport, self).__init__(ConverterErrors.SCRIPT_NOT_SUPPORT, + msg, + http_code=400) + + +class NodeTypeNotSupport(MindInsightException): + """The astNode can not support to process.""" + + def __init__(self, msg): + super(NodeTypeNotSupport, self).__init__(ConverterErrors.NODE_TYPE_NOT_SUPPORT, + msg, + http_code=400) + + +class CodeSyntaxError(MindInsightException): + """The CodeSyntaxError class definition.""" + + def __init__(self, msg): + super(CodeSyntaxError, self).__init__(ConverterErrors.CODE_SYNTAX_ERROR, + msg, + http_code=400) diff --git a/mindconverter/mindconverter/common/log.py b/mindconverter/mindconverter/common/log.py new file mode 100644 index 0000000000000000000000000000000000000000..39578bdd10ef776840a2848c1a4f48085c80243c --- /dev/null +++ b/mindconverter/mindconverter/common/log.py @@ -0,0 +1,18 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Create a logger.""" +from mindconverter.utils.log import setup_logger + +logger = setup_logger("mindconverter", "mindconverter", console=False) diff --git a/mindconverter/mindconverter/conf/__init__.py b/mindconverter/mindconverter/conf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ff947fe69147fc95f99f08182ac4658a419fd60f --- /dev/null +++ b/mindconverter/mindconverter/conf/__init__.py @@ -0,0 +1,150 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Conf module.""" + +import os +import json +import types +from importlib import import_module + + +class Settings: + """ + Definition of Settings class. + + Examples: + >>> from mindinsight.conf import settings + >>> print(settings.PORT) + """ + + _prefix = 'MINDINSIGHT_' + _explicit_settings = set() + _default_settings = set() + + def __init__(self): + """Initialization of Settings.""" + self.load_from_defaults() + self.load_from_constants() + self.refresh() + + def refresh(self): + """Refresh settings from config file and environment variables.""" + self.update_from_file() + self.update_from_env() + + def load_from_defaults(self): + """Update settings from defaults module.""" + default_settings = import_module('mindconverter.conf.defaults') + for setting in dir(default_settings): + if setting.isupper(): + setattr(self, setting, getattr(default_settings, setting)) + self._default_settings.add(setting) + + def load_from_constants(self): + """Update settings from constants module""" + constant_settings = import_module('mindconverter.conf.constants') + for setting in dir(constant_settings): + if setting.isupper(): + setattr(self, setting, getattr(constant_settings, setting)) + + def update_from_file(self): + """Update settings from config file.""" + config_path = os.environ.get('MINDINSIGHT_CONFIG', '') + if not config_path: + return + + config_module = None + + # python:full.path.for.config.module + if config_path.startswith('python:'): + config_module = import_module(config_path[len('python:'):]) + + # file:full/path/for/config.py + elif config_path.startswith('file:'): + config_path = config_path[len('file:'):] + module_name = '__mindinsightconfig__' + config_module = types.ModuleType(module_name) + machinery = import_module('importlib.machinery') + loader = machinery.SourceFileLoader(module_name, config_path) + loader.exec_module(config_module) + + if config_module is None: + return + + for setting in dir(config_module): + if setting.isupper() and setting in self._default_settings: + setting_value = getattr(config_module, setting) + setattr(self, setting, setting_value) + self._explicit_settings.add(setting) + + def update_from_env(self): + """Update settings from environment variables.""" + for key, value in os.environ.items(): + if not key.startswith(self._prefix): + continue + + setting = key[len(self._prefix):] + if setting not in self._default_settings: + continue + + setting_value = getattr(self, setting) + if isinstance(setting_value, bool): + value = (value == 'True') + elif isinstance(setting_value, (int, float)): + value = type(setting_value)(value) + elif isinstance(setting_value, (list, dict)): + value = json.loads(value) + + setattr(self, setting, value) + self._explicit_settings.add(setting) + + def config_workspace(self, workspace): + """ + Config workspace value. + + Args: + workspace (str): Path of workspace. + """ + setattr(self, 'WORKSPACE', workspace) + self._explicit_settings.add('WORKSPACE') + + def is_overridden(self, setting_name): + """ + Check if specified setting is overridden. + + Args: + setting_name (str): Setting name to be checked. + + Returns: + bool, indicate whether given setting name is overridden. + """ + return setting_name in self._explicit_settings + + def dump(self): + """ + Dump settings data. + + Returns: + dict, json formatted data of settings. + """ + config = {} + for setting in dir(self): + if setting.isupper(): + config[setting] = getattr(self, setting) + + return config + + +settings = Settings() diff --git a/mindconverter/mindconverter/conf/constants.py b/mindconverter/mindconverter/conf/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..391182ac4678b5a78969e50b1f941521f5993b99 --- /dev/null +++ b/mindconverter/mindconverter/conf/constants.py @@ -0,0 +1,93 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Constants module for mindinsight settings.""" +import logging +import math +import os + + +_DEFAULT_MAX_THREADS_COUNT = 15 + + +def _calc_default_max_processes_cnt(): + """Calc default processes count.""" + + # We need to make sure every summary directory has a process to load data. + min_cnt = _DEFAULT_MAX_THREADS_COUNT + # Do not use too many processes to avoid system problems (eg. out of memory). + max_cnt = 45 + used_cpu_ratio = 0.75 + + cpu_count = os.cpu_count() + if cpu_count is None: + return min_cnt + + processes_cnt = math.floor(cpu_count * used_cpu_ratio) + + if processes_cnt < min_cnt: + return min_cnt + + if processes_cnt > max_cnt: + return max_cnt + + return processes_cnt + + +#################################### +# Global default settings. +#################################### +LOG_FORMAT = '[%(levelname)s] MI(%(process)d:%(thread)d,%(processName)s):%(asctime)s ' \ + '[%(filepath)s:%(lineno)d][%(sub_module)s] %(message)s' + +GUNICORN_ACCESS_FORMAT = "'%(t)s %(h)s <%(r)s> %(s)s %(b)s <%(f)s> <%(a)s> %(L)s '" + +LOG_LEVEL = logging.INFO +# rotating max bytes, default is 50M +LOG_ROTATING_MAXBYTES = 52428800 + +# rotating backup count, default is 30 +LOG_ROTATING_BACKUPCOUNT = 30 + +#################################### +# Web default settings. +#################################### +HOST = '127.0.0.1' + +# Allow to support cross origin resource sharing(CORS) enable. Default is disable. +# If enable CORS, `SUPPORT_REQUEST_METHODS` should enable 'OPTIONS' method. +ENABLE_CORS = False + +SUPPORT_REQUEST_METHODS = {'POST', 'GET', 'PUT', 'DELETE'} + +# api prefix should not end with slash, correct format is /v1/url +API_PREFIX = '/v1/mindinsight' + +#################################### +# Datavisual default settings. +#################################### +MAX_THREADS_COUNT = _DEFAULT_MAX_THREADS_COUNT +MAX_PROCESSES_COUNT = _calc_default_max_processes_cnt() + +MAX_TAG_SIZE_PER_EVENTS_DATA = 300 +DEFAULT_STEP_SIZES_PER_TAG = 500 + +MAX_GRAPH_TAG_SIZE = 10 +MAX_TENSOR_TAG_SIZE = 6 +MAX_IMAGE_STEP_SIZE_PER_TAG = 10 +MAX_SCALAR_STEP_SIZE_PER_TAG = 1000 +MAX_GRAPH_STEP_SIZE_PER_TAG = 1 +MAX_HISTOGRAM_STEP_SIZE_PER_TAG = 50 +MAX_TENSOR_STEP_SIZE_PER_TAG = 20 +MAX_TENSOR_RESPONSE_DATA_SIZE = 100000 diff --git a/mindconverter/mindconverter/conf/defaults.py b/mindconverter/mindconverter/conf/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..b554869ebc42ddc9136213a6a9359b946469ee31 --- /dev/null +++ b/mindconverter/mindconverter/conf/defaults.py @@ -0,0 +1,33 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Defaults module for mindinsight settings.""" +import os + +#################################### +# Global default settings. +#################################### +WORKSPACE = os.path.join(os.environ['HOME'], 'mindinsight') + +#################################### +# Web default settings. +#################################### +PORT = 8080 +URL_PATH_PREFIX = '' + +#################################### +# Datavisual default settings. +#################################### +RELOAD_INTERVAL = 3 # Seconds +SUMMARY_BASE_DIR = os.getcwd() diff --git a/mindconverter/mindconverter/config.py b/mindconverter/mindconverter/config.py new file mode 100644 index 0000000000000000000000000000000000000000..bb189d37130f1db7f721e934c9fcfc9d2f96bfae --- /dev/null +++ b/mindconverter/mindconverter/config.py @@ -0,0 +1,479 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless REQUIRED by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""API config""" +import ast +from collections import OrderedDict +from importlib import import_module +import json +import os + +import pasta + +from mindconverter.common.log import logger +from mindconverter.common.exceptions import CodeSyntaxError + +REQUIRED = 'REQUIRED' +UNREQUIRED = 'UNREQUIRED' +FUNC_MODULE = 'mindconverter.funcs' + + +class APIPt: + """Base API for args parse, and API for one frame.""" + + def __init__(self, name: str, params: dict): + self.name = name + self.params = OrderedDict() + + for k, value in params.items(): + self.params[k] = self.to_str(value) + + @staticmethod + def to_str(value): + """ + Trans value to str. + + Args: + value (Union[str,Number,int]): The value to convert. + + Returns: + str, str type of value. + """ + if value is REQUIRED: + return value + if isinstance(value, str): + return "'{}'".format(value) + return str(value) + + def parse_args(self, call_name: str, args_str: str): + """ + Parse call_name and args_str. + + Args: + call_name (str): str of the call function, etc. + args_str (str): str of args for function, which starts with '(' and end with ')'. + + Returns: + OrderedDict, all args parsed. + + Raises: + ValueError: If can not use ast to parse or the required parse node not type of ast.Call, + or the given args_str not valid. + """ + # expr is REQUIRED to meet (**) format + if not (len(args_str) >= 2 and args_str[0] == "(" and args_str.strip()[-1] == ")"): + raise ValueError('"{}" is think as args string, it should start with "(" and end with ")" without ' + 'considering spaces'.format(args_str)) + try: + ast_node = ast.parse("whatever_call_name" + args_str) + call_node = ast_node.body[0].value + except SyntaxError as parse_error: + raise CodeSyntaxError("can't parse code:\n{}".format(args_str)) from parse_error + + # regard all actual parameter as one parameter + if len(self.params) == 1: + k = list(self.params.keys())[0] + if k.startswith('*'): + value = args_str[1:-1] + return OrderedDict([(k, value), ("call_name", call_name)]) + + args = OrderedDict() + + # param which name not assigned + param_iter = iter(self.params.keys()) + if len(call_node.args) > len(self.params): + raise ValueError('Parse args of torch in {}, but there is problems with params'.format(call_name)) + for arg in call_node.args: + if isinstance(arg, ast.Starred): + logger.debug("Find *%s", arg.value.id) + args['*'] = arg.value.id + else: + # remove \n + args[next(param_iter)] = pasta.dump(arg).strip() + + # params which name is assigned + for keyword in call_node.keywords: + if keyword.arg is None: + logger.info("Find **%s", keyword.value.id) + args['**'] = keyword.value.id + else: + # remove \n + args[keyword.arg] = pasta.dump(keyword.value).strip() + + args["call_name"] = call_name + return args + + +class APIMs(APIPt): + """API for MindSpore""" + + def __init__(self, name: str, params: dict, p_attrs=None): + self.is_primitive = name.startswith('P.') + if self.is_primitive: + self.p_attrs = p_attrs if p_attrs else set() + super(APIMs, self).__init__(name, params) + + def create_args(self, params_pt, args_pt, ms2pt_map, explicit_map): + """ + Create args for MindSpore according to other frame op info. + + Args: + params_pt (OrderedDict): Params used for initialize function of APIPt. + args_pt (OrderedDict): Args parsed from APIPt. + ms2pt_map (dict): Dict of params mapping relation for ops between frames. + explicit_map(func): Function to generate mapping relation for ops between frames. + + Returns: + OrderedDict, args for MindSpore. + """ + args = OrderedDict() + + # traverse MindSpore's params + for k in self.params.keys(): + # has relevant param? yes + if k in ms2pt_map: + if ms2pt_map[k] in args_pt: + # user assigned value + args[k] = args_pt[ms2pt_map[k]] + elif self.params[k] != params_pt[ms2pt_map[k]]: + # user didn't assigned value, but initial value different between 2 frames + args[k] = params_pt[ms2pt_map[k]] + # has relevant param? no + else: + # params forced to display + if k in explicit_map: + args[k] = explicit_map[k] + elif self.params[k] is REQUIRED: + args[k] = "" + + # find * or ** in frame actual parameters + for star in ('*', '**'): + if star in args_pt: + args[star] = args_pt[star] + + return args + + +class MappingHelper: + """Mapping from one frame to another frame""" + + def __init__(self, ms_api: APIMs, pt_api: APIPt, **kwargs): + ms2pt_mapping = kwargs.get('ms2pt_mapping') + gen_explicit_map = kwargs.get('gen_explicit_map') + export_key = kwargs.get('export_key') + + if ms2pt_mapping is None: + ms2pt_mapping = {} + if gen_explicit_map is None: + gen_explicit_map = lambda params_pt, args_pt: {} + self.ms_api = ms_api + self.pt_api = pt_api + self.ms2pt_mapping = ms2pt_mapping + self.gen_explicit_map = gen_explicit_map + if export_key is not None: + self.export_key = export_key + else: + self.export_key = not ms_api.is_primitive + + def gen_args_expr(self, args): + """ + Generate str assignment statement from given dict. + + Args: + args (OrderedDict): Key, value pairs for assignment source. + + Returns: + str, generated str. + """ + expr = '' + for key, value in args.items(): + if expr: + expr += ', ' + sym = '' if key in ('*', '**') else '=' + if self.export_key: + expr += key + sym + expr += value + return expr + + def gen_args_expr_for_p(self, args, p_attrs): + """ + Generate str assignment statement from given dict for primitive and not primitive. + + Args: + args (OrderedDict): Key, value pairs for assignment source. + p_attrs (set): Exclusive params for operator. + + Returns: + tuple, generated str for primitive, generated str for not primitive. + """ + args_attrs = OrderedDict([(k, v) for k, v in args.items() if k in p_attrs]) + args_ios = OrderedDict([(k, v) for k, v in args.items() if k not in p_attrs]) + return self.gen_args_expr(args_attrs), self.gen_args_expr(args_ios) + + def convert(self, call_name_pt: str, args_str_pt: str): + """ + Convert code sentence to MindSpore code sentence. + + Args: + call_name_pt (str): str of the call function, etc. + args_str_pt (str): str of args for function, which starts with '(' and end with ')'. + + Returns: + str, converted code sentence for MindSpore. + """ + # all value for args_pt is str + args_pt = self.pt_api.parse_args(call_name_pt, args_str_pt) + #DEBUG args_pt + # print("ARGS_PT") + # print(args_pt) + # all value for args_ms is str + explicit_map = self.gen_explicit_map(self.pt_api.params, args_pt) + args_ms = self.ms_api.create_args(self.pt_api.params, args_pt, self.ms2pt_mapping, explicit_map) + pt_param_input = list(args_pt.values())[0] + pt_param_alias = list(args_pt.keys())[0] + if("call_name" in args_pt.keys()): + pt_call_name = args_pt["call_name"] + + if self.ms_api.is_primitive: + if self.pt_api.name == '.size' and 'idx' in args_pt: + args_expr = self.gen_args_expr(args_ms) + expr_ms = "%s()(%s)[%s]" % (self.ms_api.name, args_expr, args_pt['idx']) + else: + expr_attrs, expr_ios = self.gen_args_expr_for_p(args_ms, self.ms_api.p_attrs) + expr_ms = "%s(%s)(%s)" % (self.ms_api.name, expr_attrs, expr_ios) + elif self.ms2pt_mapping == {}: + # print("********DEBUG********") + # print(pt_call_name) + # print(self.ms_api.name) + # print(pt_param_input) + # print(pt_param_alias) + # print("********DEBUG********") + if(self.ms_api.name.startswith(".")): + expr_ms = "%s%s(%s)" % (pt_call_name, self.ms_api.name, pt_param_input) + else: + if(pt_param_alias == "inplace"): + expr_ms = "%s()" % (self.ms_api.name) + else: + ms_expr = self.gen_args_expr(args_ms) + expr_ms = "%s(%s)" % (self.ms_api.name, ms_expr) + else: + ms_expr = self.gen_args_expr(args_ms) + expr_ms = "%s(%s)" % (self.ms_api.name, ms_expr) + + return expr_ms + + +def get_ms_api(ms_api_info): + """ + Get APIMs instance from ms_api_info. + + Args: + ms_api_info (list): info for create an APIMs instance, the first value in list is name for APIMs, the second(if + provided) is params for APIMs, the third(if provided) is p_attrs for APIMs. + + Returns: + APIMs, instance of APIMs parsed from given info. + """ + ms_name = ms_api_info[0] + ms_params = ms_api_info[1] if len(ms_api_info) >= 2 else None + ms_p_attrs = set(ms_api_info[2]) if len(ms_api_info) >= 3 else None + ms_api = APIMs(name=ms_name, params=ms_params, p_attrs=ms_p_attrs) + return ms_api + + +def get_pt_api(pt_api_info): + """ + Get APIPt instance from pt_api_info. + + Args: + pt_api_info (list): info for create an APIMs instance, the first value in list is name for APIPt, the second(if + provided) is params for APIPt. + + Returns: + APIMs, instance of APIMs parsed from given info. + """ + pt_name = pt_api_info[0] + pt_params = pt_api_info[1] if len(pt_api_info) >= 2 else None + pt_api = APIPt(name=pt_name, params=pt_params) + return pt_api + + +def get_mapping_from_file(path): + """ + Parse mapping info from given file. + + Args: + path (str): The file path. + + Returns: + dict, key is op name, value is a relevant instance of MappingHelper. + """ + mapping_info_d = load_json_file(path) + parse_mapping_dict = {} + for key, value in mapping_info_d.items(): + ms_api_info = value.pop('ms_api') + ms_api = get_ms_api(ms_api_info) + pt_api_info = value.pop('pt_api') + pt_api = get_pt_api(pt_api_info) + gen_explicit_map = value.get('gen_explicit_map') + if gen_explicit_map: + module_name = import_module(FUNC_MODULE) + value['gen_explicit_map'] = getattr(module_name, gen_explicit_map) + + parse_mapping_dict.update({key: MappingHelper(**dict(ms_api=ms_api, pt_api=pt_api), **value)}) + return parse_mapping_dict + + +def load_json_file(file_path): + """ + Load data from given json file path. + + Args: + file_path (str): The file to load json data from. + + Returns: + list(str), the list data stored in file_path. + """ + with open(file_path, 'r', encoding='utf-8') as file: + info = json.loads(file.read()) + return info + + +def get_corresponding_ms_name(pt_name): + """ + Get corresponding MindSpore op name for PyTorch name according to the mappings in mindconverter. + + Args: + pt_name: PyTorch op name, whether shortened form or full name is available. + + Returns: + str, full MindSpore op name, None if the op is not supported in mindconverter. + + Raises: + ValueError, if get shortened form of MindSpore name not starts with `P` or 'nn', which means it is wrong in + the mappings file. + """ + helper = ALL_MAPPING.get(pt_name) + if helper is None: + return None + ms_name = helper.ms_api.name + if ms_name.startswith('nn.'): + full_ms_name = 'mindspore.' + ms_name + elif ms_name.startswith('P.'): + full_ms_name = 'mindspore.ops.operations.' + ms_name[len('P.'):] + else: + raise ValueError('check your mapping infos, the corresponding mindspore op name may wrong for torch op : ' + '{}'.format(pt_name)) + return full_ms_name + + +def get_prompt_info(pt_name): + """ + Get prompt info for PyTorch op name. + + Args: + pt_name: PyTorch op name, whether shortened form or full name is available. + + Returns: + str, prompt info on the op, None if no prompt info for the op. + """ + prompt_dict = {**UNSUPPORTED_WARN_INFOS, **SUPPORTED_WARN_INFOS} + return prompt_dict.get(pt_name) + + +# ---------------------------- mappings ---------------------------- +NN_MAPPING_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'mappings/nn_mappings.json')) +NN_MAPPING = get_mapping_from_file(NN_MAPPING_PATH) +# update to add key with full api_name, which starts with 'torch.nn.' +NN_MAPPING.update({"torch." + k: v for k, v in NN_MAPPING.items()}) + +F_MAPPING_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'mappings/f_mappings.json')) +F_MAPPING = get_mapping_from_file(F_MAPPING_PATH) +# update to add key starts with 'nn.functional.' +NN_FUNCTIONAL_D = {"nn.functional." + k[len('F.'):]: v for k, v in F_MAPPING.items()} +# update to add key starts with 'torch.nn.functional.' +TORCH_NN_FUNCTIONAL_D = {"torch.nn.functional." + k[len('F.'):]: v for k, v in F_MAPPING.items()} +F_MAPPING.update(NN_FUNCTIONAL_D) +F_MAPPING.update(TORCH_NN_FUNCTIONAL_D) + +TORCH_DOT_MAPPING_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'mappings/torch_dot_mappings.json')) +TORCH_DOT_MAPPING = get_mapping_from_file(TORCH_DOT_MAPPING_PATH) + +TENSOR_DOT_MAPPING_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'mappings/tensor_dot_mappings.json')) +TENSOR_DOT_MAPPING = get_mapping_from_file(TENSOR_DOT_MAPPING_PATH) + +TORCH_OPTIM_MAPPING_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'mappings/torch_optim_mappings.json')) +TORCH_OPTIM_MAPPING = get_mapping_from_file(TORCH_OPTIM_MAPPING_PATH) +TORCH_OPTIM_MAPPING.update({"torch." + k : v for k, v in TORCH_OPTIM_MAPPING.items()}) + +ALL_MAPPING = {**NN_MAPPING, **F_MAPPING, **TORCH_DOT_MAPPING, **TENSOR_DOT_MAPPING, **TORCH_OPTIM_MAPPING} + +# ---------------------------- api list support or not support ---------------------------- +NN_LIST_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'ops', 'nn_list.json')) +NN_LIST = load_json_file(NN_LIST_PATH) +NN_LIST += ["torch." + name for name in NN_LIST] +NN_SUPPORTED = [x for x in NN_LIST if x in ALL_MAPPING] +NN_UNSUPPORTED = [x for x in NN_LIST if x not in ALL_MAPPING] + +F_LIST_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'ops', 'f_list.json')) +F_LIST = load_json_file(F_LIST_PATH) +F_LIST += ["F." + name[len("torch.nn.functional."):] for name in F_LIST] + \ + [name[len("torch."):] for name in F_LIST] +F_SUPPORTED = [x for x in F_LIST if x in ALL_MAPPING] +F_UNSUPPORTED = [x for x in F_LIST if x not in ALL_MAPPING] + +TORCH_DOT_LIST_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'ops', 'torch_dot_list.json')) +TORCH_DOT_LIST = load_json_file(TORCH_DOT_LIST_PATH) + +TORCH_DOT_SUPPORTED = [x for x in TORCH_DOT_LIST if x in ALL_MAPPING] +TORCH_DOT_UNSUPPORTED = [x for x in TORCH_DOT_LIST if x not in ALL_MAPPING] + +TENSOR_DOT_LIST_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'ops', 'tensor_dot_list.json')) +TENSOR_DOT_LIST = load_json_file(TENSOR_DOT_LIST_PATH) + +TENSOR_DOT_SUPPORTED = [x for x in TENSOR_DOT_LIST if x in ALL_MAPPING] +TENSOR_DOT_UNSUPPORTED = [x for x in TENSOR_DOT_LIST if x not in ALL_MAPPING] + +TORCH_OPTIM_LIST_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'ops', 'torch_optim_list.json')) +TORCH_OPTIM_LIST = load_json_file(TORCH_OPTIM_LIST_PATH) +TORCH_OPTIM_LIST += ["torch." + name for name in TORCH_OPTIM_LIST] +TORCH_OPTIM_SUPPORTED = [x for x in TORCH_OPTIM_LIST if x in ALL_MAPPING] +TORCH_OPTIM_UNSUPPORTED = [x for x in TORCH_OPTIM_LIST if x not in ALL_MAPPING] + +ALL_2P_LIST = F_LIST + TORCH_DOT_LIST + TENSOR_DOT_LIST + TORCH_OPTIM_LIST +ALL_TORCH_APIS = NN_LIST + F_LIST + TORCH_DOT_LIST + TENSOR_DOT_LIST + TORCH_OPTIM_LIST +ALL_SUPPORTED = NN_SUPPORTED + F_SUPPORTED + TORCH_DOT_SUPPORTED + TENSOR_DOT_SUPPORTED + TORCH_OPTIM_SUPPORTED +ALL_UNSUPPORTED = NN_UNSUPPORTED + F_UNSUPPORTED + TORCH_DOT_UNSUPPORTED + TENSOR_DOT_UNSUPPORTED + TORCH_OPTIM_UNSUPPORTED + +UNSUPPORTED_WARN_INFOS_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'warn_info', 'unsupported_warn_infos.json')) +UNSUPPORTED_WARN_INFOS = load_json_file(UNSUPPORTED_WARN_INFOS_PATH) + +NN_UNSUPPORTED_INFOS = {k: v for k, v in UNSUPPORTED_WARN_INFOS.items() if k.startswith('nn.')} +TORCH_NN_UNSUPPORTED_INFOS = {('torch.' + k): v for k, v in NN_UNSUPPORTED_INFOS.items()} + +F_UNSUPPORTED_INFOS = {k: v for k, v in UNSUPPORTED_WARN_INFOS.items() if k.startswith('F.')} +NN_FUNCTIONAL_UNSUPPORTED_INFOS = {'nn.functional.' + k[len('F.'):]: v for k, v in F_UNSUPPORTED_INFOS.items()} +TORCH_NN_FUNCTIONAL_UNSUPPORTED_INFOS = {'torch.nn.functional.' + k[len('F.'):]: v for k, v in + F_UNSUPPORTED_INFOS.items()} + +UNSUPPORTED_WARN_INFOS.update(TORCH_NN_UNSUPPORTED_INFOS) +UNSUPPORTED_WARN_INFOS.update(NN_FUNCTIONAL_UNSUPPORTED_INFOS) +UNSUPPORTED_WARN_INFOS.update(TORCH_NN_FUNCTIONAL_UNSUPPORTED_INFOS) + +SUPPORTED_WARN_INFOS_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'warn_info', 'supported_warn_infos.json')) +SUPPORTED_WARN_INFOS = load_json_file(SUPPORTED_WARN_INFOS_PATH) + +NN_SUPPORTED_INFOS = {k: v for k, v in SUPPORTED_WARN_INFOS.items() if k.startswith('nn.')} +TORCH_NN_SUPPORTED_INFOS = {('torch.' + k): v for k, v in NN_SUPPORTED_INFOS.items()} +SUPPORTED_WARN_INFOS.update(TORCH_NN_SUPPORTED_INFOS) diff --git a/mindconverter/mindconverter/converter.py b/mindconverter/mindconverter/converter.py new file mode 100644 index 0000000000000000000000000000000000000000..c19542cd9f823b4d759adb5dd746d08a97ab2812 --- /dev/null +++ b/mindconverter/mindconverter/converter.py @@ -0,0 +1,201 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless REQUIRED by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""converter module""" +import os +import stat + +import pasta + +from mindconverter.common.exceptions import ScriptNotSupport +from mindconverter.common.log import logger +from mindconverter.ast_edits import AstEditVisitor + + +class Converter: + """Convert class""" + + flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL + modes = stat.S_IWUSR | stat.S_IRUSR + + def __init__(self): + self._tree = None + self._infile = None + self._code_analyzer = None + self._ast_editor = None + self._report = [] + + def convert(self, infile, output_dir, report_dir): + """ + Convert a module's code, code converted will be save in output_dir, and a report will be save in report_dir. + + Args: + infile (str): The script to convert. + output_dir (str): The path to save converted file. + report_dir (str): The path to save report file. + """ + in_file_split = _path_split(infile) + in_file_split[-1], _ = _get_name_ext(in_file_split[-1]) + module_name = '.'.join(in_file_split) + with open(infile, 'r') as file: + content = ''.join(file.readlines()) + + self._infile = infile + self._tree = pasta.parse(content) + self._report.clear() + try: + logger.info("待转换的脚本文件为 %s", infile) + logger.info("开始转换 %s", module_name) + self._report.append('[开始转换]') + self._ast_editor = AstEditVisitor() + self._ast_editor.process(self._tree) + self._report.extend(self._ast_editor.get_logs()) + self._report.append('[转换完毕]') + dest_file = os.path.join(output_dir, os.path.basename(infile)) + with os.fdopen(os.open(dest_file, self.flags, self.modes), 'w') as file: + script = pasta.dump(self._tree) + script = adjust_mindspore_import_position(script) + file.write(script) + logger.info("转换操作已完成,转换结果已被写入到 %s.", dest_file) + except ScriptNotSupport as error: + self._report.append('[不支持脚本] ' + error.message) + self._report.append('[转换失败]') + raise error + except Exception as error: + self._report.clear() + raise error + finally: + if self._report: + dest_report_file = os.path.join(report_dir, + '_'.join(os.path.basename(infile).split('.')[:-1]) + '_report.txt') + with os.fdopen(os.open(dest_report_file, self.flags, self.modes), 'a') as file: + file.write('\n'.join(self._report)) + logger.info("请注意查看转换报告然后进行手动调试,转换报告已被保存到 %s", dest_report_file) + + @staticmethod + def convert_api(source_code): + """ + Convert api_name in code to MindSpore api, if api_name is a python api, code will not convert. + + Args: + source_code (ast.Call): The ast node to convert. + Returns: + str, the converted code. + """ + ast_node = pasta.parse(source_code).body[0].value + check_context = False + replaced_code = AstEditVisitor().mapping_api(ast_node, check_context) + return replaced_code + + +def get_code_start_line_num(source_lines): + """ + Get the start code line number exclude comments. + + Args: + source_lines (list[str]): Split results of code. + + Returns: + int, the start line number. + """ + stack = [] + index = 0 + for i, line in enumerate(source_lines): + line_strip = line.strip() + if line_strip.startswith('#'): + continue + if line_strip.startswith('"""'): + if not line_strip.endswith('"""'): + stack.append('"""') + continue + if line_strip.startswith("'''"): + if not line_strip.endswith("'''"): + stack.append("'''") + continue + if line_strip.endswith('"""') or line_strip.endswith("'''"): + stack.pop() + continue + if line_strip != '' and not stack: + index = i + break + return index + + +def adjust_mindspore_import_position(script): + """ + Adjust code sentence `import mindspore` in script to a proper position if the sentence is set before a comment. + + Args: + script (str): code script before adjust. + + Returns: + str, code script adjusted. + """ + script_list = script.split('\n') + import_ms_sentence = 'import mindspore' + if import_ms_sentence in script_list: + import_index = script_list.index(import_ms_sentence) + if script_list[import_index + 1].startswith('"""') or script_list[import_index + 1].startswith("'''"): + script_list.pop(import_index) + new_index = get_code_start_line_num(script_list) + script_list.insert(new_index, import_ms_sentence) + script = '\n'.join(script_list) + return script + + +def _get_name_ext(file): + """ + Split a file name in name and extension. + + Args: + file (str): Full file path. + + Returns: + tuple (str, str), name and extension. + """ + _, name = os.path.split(file) + return os.path.splitext(name) + + +def _path_split(file): + """ + Split a path in head and tail. + + Args: + file (str): The file path. + + Returns: + list[str], list of file tail + """ + file_dir, name = os.path.split(file) + if file_dir: + sep = file[len(file_dir)-1] + if file_dir.startswith(sep): + return file.split(sep)[1:] + + return file.split(sep) + return [name] + + +def main(files_config): + """ + The entrance for converter, script files will be converted. + + Args: + files_config (dict): The config of files which to convert. + """ + convert_ins = Converter() + in_files = files_config['in_files'] + for in_file in in_files: + convert_ins.convert(in_file, files_config['outfile_dir'], files_config['report_dir']) diff --git a/mindconverter/mindconverter/forward_call.py b/mindconverter/mindconverter/forward_call.py new file mode 100644 index 0000000000000000000000000000000000000000..43b8bd59c4707e218d849516508ffab3c3bc9f96 --- /dev/null +++ b/mindconverter/mindconverter/forward_call.py @@ -0,0 +1,110 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Find out forward functions of script file""" +import ast + +import pasta + + +class ForwardCall(ast.NodeVisitor): + """ + AST visitor that processes forward calls. + + Find the sub functions called by the forward function in the script file. + """ + + def __init__(self, ast_tree): + self._tree = ast_tree + self._name_stack = [] + self._forward_stack = [] + self.calls = {} # key is function name, value is forward function ast node. + self._function_list = {} # key is function name, value is function ast node. + self.process() + + + def process(self): + """visit ast tree to find the forward functions.""" + self.visit(self._tree) + # first visit to find out all functions, so restores all variables except _function_list + self._name_stack.clear() + self._forward_stack.clear() + self.calls.clear() + self.visit(self._tree) + + + def get_current_namespace(self): + """Get the namespace when visit the AST node""" + namespace = '.'.join(self._name_stack) + return namespace + + + @classmethod + def get_call_name(cls, node): + """Get functional call name.""" + if not isinstance(node, ast.Call): + return None + + return pasta.dump(node.func) + + + def visit_ClassDef(self, node): + """Callback function when visit AST tree""" + self._name_stack.append(node.name) + self.generic_visit(node) + self._name_stack.pop() + + + def visit_FunctionDef(self, node): + """Callback function when visit AST tree""" + namespace = self.get_current_namespace() + if namespace: + func_name = f'{namespace}.{node.name}' + else: + func_name = node.name + func_name = f'{self.get_current_namespace()}.{node.name}' + is_in_chain = func_name in self.calls or node.name == 'forward' + if is_in_chain: + self._forward_stack.append(func_name) + + if node.name == 'forward': + self.calls.update({func_name: node}) + + self._function_list.update({func_name: node}) + self.generic_visit(node) + + if is_in_chain: + self._forward_stack.pop() + + + def visit_Call(self, node): + """Callback function when visit AST tree""" + for arg in node.args: + self.visit(arg) + for keyword in node.keywords: + self.visit(keyword.value) + func_name = self.get_call_name(node) + if isinstance(node.func, ast.Name): + if func_name not in ['super', 'str', 'repr']: + if self._forward_stack: + self.calls.update({func_name: self._function_list.get(func_name)}) + self.visit(node.func) + else: + if self._forward_stack: + if func_name.startswith('self.'): + whole_name = f'{self.get_current_namespace()}.{func_name.split(".")[-1]}' + self.calls.update({whole_name: self._function_list.get(whole_name)}) + else: + self.calls.update({func_name: self._function_list.get(func_name)}) + self.visit(node.func) diff --git a/mindconverter/mindconverter/funcs.py b/mindconverter/mindconverter/funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..323565aee5e3e4f9bb124bab41e2a43d487e0d5a --- /dev/null +++ b/mindconverter/mindconverter/funcs.py @@ -0,0 +1,144 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless REQUIRED by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""funcs for gen_explicit_map""" +from functools import partial + + +def gen_explicit_map_f_max_pool2d(params_pt, args_pt): + """ + Generate explicit_map for F.MaxPool2d. + + Args: + params_pt (dict): Params for APIPt. + args_pt (dict): Args for APIPt. + + Returns: + dict, map between frames. + """ + if 'padding' in args_pt: + padding = args_pt['padding'] + else: + padding = params_pt['padding'] + if padding.strip() in ("0", "(0,0)", "(0, 0)"): + padding = "'valid'" + else: + padding = "'same'" + + if 'stride' in args_pt: + strides = args_pt['stride'] + else: + strides = args_pt['kernel_size'] + + return {"padding": padding, + "strides": strides} + + +def gen_explicit_map_nn_sequential(_, args_pt): + """ + Generate explicit_map for nn.Sequential. + + Args: + args_pt (dict): Args for APIPt. + + Returns: + dict, map between frames. + """ + args = args_pt['*args'] + return {"*args": "[{}]".format(args)} + + +def gen_explicit_map_one_delta(params_pt, args_pt, k_ms, k_pt): + """ + Generate explicit_map for which include mapping relationship is `1 - k_ms = k_pt`. + + Args: + params_pt (dict): Params for APIPt. + args_pt (dict): Args for APIPt. + + Returns: + dict, map between frames. + """ + value = args_pt[k_pt] if k_pt in args_pt else params_pt[k_pt] + value = value.strip() + + def is_number(string): + try: + float(string) + return True + except ValueError: + return False + + if is_number(value): + return {k_ms: str(1 - float(value))} + return {k_ms: "1.0 - " + value} + + +def gen_explicit_map_nn_maxpool2d(params_pt, args_pt): + """ + Generate explicit_map for nn.MaxPool2d. + + Args: + params_pt (dict): Params for APIPt. + args_pt (dict): Args for APIPt. + + Returns: + dict, map between frames. + """ + if 'padding' in args_pt: + padding = args_pt['padding'] + else: + padding = params_pt['padding'] + if padding.strip() in ("0", "(0,0)", "(0, 0)"): + pad_mode = "'valid'" + else: + pad_mode = "'same'" + + if 'stride' in args_pt: + stride = args_pt['stride'] + else: + stride = args_pt['kernel_size'] + + return {"pad_mode": pad_mode, + "stride": stride} + + +def torch_dot_eye_gen_explicit_map(_, args_pt): + """ + Generate explicit_map for torch.eye. + + Args: + args_pt (dict): Args for APIPt. + + Returns: + dict, map between frames. + """ + explicit_map = {'t': 'mindspore.int32'} + if args_pt.get('m'): + explicit_map.update({'m': args_pt.get('m')}) + else: + explicit_map.update({'m': args_pt.get('n')}) + return explicit_map + +tensor_dot_permute_gen_explicit_map = lambda params_pt, args_pt: {"input_perm": "(" + args_pt["*dIms"] + ",)"} +tensor_dot_repeat_gen_explicit_map = lambda params_pt, args_pt: {"multiples": "(" + args_pt["*sizes"] + ",)"} +tensor_dot_reshape_gen_explicit_map = lambda params_pt, args_pt: {"shape": "(" + args_pt["*shape"] + ",)"} +torch_dot_zeros_gen_explicit_map = lambda params_pt, args_pt: {"size": "(" + args_pt["*size"] + ",)"} +tensor_dot_view_gen_explicit_map = lambda params_pt, args_pt: {"shape": "(" + args_pt["*shape"] + ",)"} +nn_conv2d_gen_explicit_map = lambda params_pt, args_pt: {"pad_mode": "'pad'"} +nn_batchnorm2d_gen_explicit_map = partial(gen_explicit_map_one_delta, k_ms="momentum", k_pt="momentum") +nn_batchnorm1d_gen_explicit_map = nn_batchnorm2d_gen_explicit_map +nn_dropout_gen_explicit_map = partial(gen_explicit_map_one_delta, k_ms="keep_prob", k_pt="p") +torch_dot_add_gen_explicit_map = lambda params_pt, args_pt:\ + {"input_y": (args_pt['value'] + '*' + args_pt["alpha"]) if args_pt.get("alpha") else args_pt['value']} diff --git a/mindconverter/mindconverter/map_api_download.py b/mindconverter/mindconverter/map_api_download.py new file mode 100644 index 0000000000000000000000000000000000000000..f5311de7b89f1385655ccc2cb74f08a7a46a409e --- /dev/null +++ b/mindconverter/mindconverter/map_api_download.py @@ -0,0 +1,273 @@ +import requests +import re +import json +from bs4 import BeautifulSoup +from fuzzywuzzy import fuzz +#获取算子字典 +def get_ops_dict(version='master', pt_filter=None, ms_filter=None): + url = f"https://gitee.com/mindspore/docs/raw/{version}/docs/mindspore/source_zh_cn/note/api_mapping/pytorch_api_mapping.md" + response = requests.get(url) + content = response.content.decode("utf-8") + + if not pt_filter: + pt_filter = lambda x: x + if not ms_filter: + ms_filter = lambda x: x + table_pattern = re.compile( + r'\|.*?API.*API.*说明.*\|\n\|.*?-+.*?\|.*?-+.*?\|.*?-+.*?\|\n((\|.*?\|.*?\|.*?\|\n?)*)' + ) + item_pattern = re.compile( + r'\|.*?\[([\w\.]+)\]\(.*?\).*?\|.*?(None|\[[\w\.]+\]\(.*?\)).*?\|\s*?(.*?)\s*?\|' + ) + table_content = re.findall(table_pattern, content) + api_dict = {} + for t in table_content: + item_content = re.findall(item_pattern, str(t)) + # table_dict = {pt_filter(k):ms_filter(v) for k, v, c in item_content if c in ['一致', '功能一致,参数名不同']} + table_dict = {pt_filter(k): ms_filter(v.split(']')[0][1:]) for k, v, c in item_content if v != 'None'} + api_dict.update(table_dict) + return api_dict + +def get_api_type(type="torch"): + api_dict = {} + ret = get_ops_dict() + for k,v in ret.items(): + if(v.split(".")[1] not in ["ops", "nn", "Tensor", "experimental"]): + continue + cur_type = k.split(".") + if(type == "torch"): + if(cur_type[0] == "torch" and cur_type[1] != "nn" and cur_type[1] != "Tensor" and cur_type[1] != "distributions" and cur_type[1] != "distributed"and cur_type[1] != "optim"): + api_dict.update({k:v}) + elif(type == "nn"): + if(cur_type[1] == "nn" and cur_type[2] != "functional"): + api_dict.update({k[6:]:v}) + elif(type == "nn.functional"): + if(cur_type[1] == "nn" and cur_type[2] == "functional"): + api_dict.update({k:v}) + elif(type == "tensor"): + if(cur_type[0] == "torch" and cur_type[1] == "Tensor"): + api_dict.update({k[12:]:v[16:]}) + elif(type == "optim"): + if(cur_type[0] == "torch" and cur_type[1] == "optim"): + api_dict.update({k[6:]:v}) + return api_dict + +#匹配算子的输入参数 +def get_ops_input(ms_version='master', pt_version='stable', ms_ops=None, pt_ops=None, type="torch"): + ret = get_api_type(type) + ops_mapping = {} + IS_REQUIRED="REQUIRED" + for k,v in ret.items(): + if(type == "tensor"): + if(v[0] != "."): + continue + + ops_api = { + "ms_api": [], + "pt_api": [], + "ms2pt_mapping": {}, + "gen_explicit_map": None + } + #Mindpsore API + ops_name = v + if(type == "tensor"): + ops_name_tensor = ops_name + ops_name = "mindspore.Tensor" + ops_name_tensor + ops_api["ms_api"].append(ops_name_tensor) + else: + if(ops_name[10:22] == "experimental"): + ops_api["ms_api"].append(ops_name[23:]) + else: + ops_api["ms_api"].append(ops_name[10:]) + + if(type == "torch" or type == "nn.functional" or ops_name == "ops.clip_by_norm" or ops_name == "ops.clip_by_value"): + url = f"https://www.mindspore.cn/docs/zh-CN/{ms_version}/api_python/ops/{ops_name}.html" + elif(type == "nn"): + url = f"https://www.mindspore.cn/docs/zh-CN/{ms_version}/api_python/nn/{ops_name}.html" + elif(type == "tensor"): + url = f"https://www.mindspore.cn/docs/zh-CN/{ms_version}/api_python/mindspore/Tensor/{ops_name}.html" + elif(type == "optim"): + if(ops_name[10:12] == "nn"): + url = f"https://www.mindspore.cn/docs/zh-CN/{ms_version}/api_python/nn/{ops_name}.html" + else: + url = f"https://www.mindspore.cn/docs/zh-CN/{ms_version}/api_python/experimental/optim/{ops_name}.html" + response = requests.get(url) + soup = BeautifulSoup(response.text, 'html.parser') + text = soup.get_text() + pattern = ops_name +"\((.*)\)" + match = re.findall(pattern, text) + parameters = [] + for i in range(len(match)): + if(match[i] != ''): + parameters = match[i].split(", ") + break + if(parameters == []): + print("【警告】在mindspore文档搜索函数参数失败或者对应文档中函数参数为空,函数名为"+ops_name) + # try: + # parameters = match[0].split(", ") + # except: + # print("【错误】在mindspore文档搜索函数参数失败,函数名为"+ops_name) + # print(f"函数 {ops_name} 的参数列表为:{parameters}") + # print("MS_PARAM") + # print(parameters) + param_dict_ms = {} + for param in parameters: + if(param == "*"): + continue + if(param.find("=") != -1): + default_param = param.split("=") + if(default_param[1] == "False"): + param_dict_ms.update({default_param[0]:False}) + elif(default_param[1] == "True"): + param_dict_ms.update({default_param[0]:True}) + elif(default_param[1] == "None"): + param_dict_ms.update({default_param[0]:None}) + else: + try: + param_dict_ms.update({default_param[0]:float(default_param[1])}) + except: + param_dict_ms.update({default_param[0]:default_param[1]}) + else: + param_dict_ms.update({param:IS_REQUIRED}) + ops_api["ms_api"].append(param_dict_ms) + #Pytorch API + ops_name = k + if(type == "nn.functional"): + ops_name_F = "F." + ops_name[20:] + ops_api["pt_api"].append(ops_name_F) + elif(type == "nn"): + ops_name_nn = ops_name + ops_name = "torch." + ops_name_nn + ops_api["pt_api"].append(ops_name_nn) + elif(type == "tensor"): + ops_name_tensor = ops_name + ops_name = "torch.Tensor" + ops_name_tensor + ops_api["pt_api"].append(ops_name_tensor) + elif(type == "optim"): + ops_name_optim = ops_name + ops_name = "torch." + ops_name_optim + ops_api["pt_api"].append(ops_name_optim) + else: + ops_api["pt_api"].append(ops_name) + + url = f"https://pytorch.org/docs/{pt_version}/generated/{ops_name}.html" + response = requests.get(url) + soup = BeautifulSoup(response.text, 'html.parser') + text = soup.get_text() + if(type == "tensor"): + pattern = ops_name_tensor[1:] + "\((.*)\)" + else: + pattern = ops_name + "\((.*)\)" + match = re.findall(pattern, text) + parameters = [] + for i in range(len(match)): + if(match[i] != ''): + parameters = match[i].split(", ") + break + if(parameters == []): + print("【警告】在torch文档搜索函数参数失败或者对应文档中函数参数为空,函数名为"+ops_name) + # try: + # parameters = match[0].split(", ") + # except: + # print("【错误】在torch文档搜索函数参数失败,函数名为"+ops_name) + # print(f"函数 {ops_name} 的参数列表为:{parameters}") + # print("TORCH_PARAM") + # print(parameters) + param_dict_pt = {} + for param in parameters: + if(param == "*"): + continue + if(param.find("=") != -1): + default_param = param.split("=") + if(default_param[1] == "False"): + param_dict_pt.update({default_param[0]:False}) + elif(default_param[1] == "True"): + param_dict_pt.update({default_param[0]:True}) + elif(default_param[1] == "None"): + param_dict_pt.update({default_param[0]:None}) + else: + try: + param_dict_pt.update({default_param[0]:float(default_param[1])}) + except: + param_dict_pt.update({default_param[0]:default_param[1]}) + else: + param_dict_pt.update({param:IS_REQUIRED}) + ops_api["pt_api"].append(param_dict_pt) + #生成参数对应表 + print("正在完成MindSpore算子 %s 到Pytorch算子 %s 的参数解析"% (v,k)) + ms_param_list = list(param_dict_ms.keys()) + pt_param_list = list(param_dict_pt.keys()) + ms_param_num = len(ms_param_list) + pt_param_num = len(pt_param_list) + for i in range(ms_param_num): + flag = False + if(ms_param_list[i] == "learning_rate"): + ops_api["ms2pt_mapping"].update({"learning_rate":"lr"}) + continue + for j in range(pt_param_num): + if(ms_param_list[i] == pt_param_list[j]): + ops_api["ms2pt_mapping"].update({ms_param_list[i]:pt_param_list[j]}) + flag = True + if(flag == False and i < pt_param_num): + if(ms_param_list[i][0:1] == "x" or ms_param_list[i][0:5] == "input" or ms_param_list[i][0:6] == "*input" or ms_param_list[i][0:4] == "axis" or ms_param_list[i][0:9] == "multiples"): + ops_api["ms2pt_mapping"].update({ms_param_list[i]:pt_param_list[i]}) + elif(fuzz.partial_ratio(ms_param_list[i],pt_param_list[i]) > 50): + ops_api["ms2pt_mapping"].update({ms_param_list[i]:pt_param_list[i]}) + + if(type == "nn.functional"): + ops_mapping[ops_name_F] = ops_api + elif(type == "nn"): + ops_mapping[ops_name_nn] = ops_api + elif(type == "tensor"): + ops_mapping[ops_name_tensor] = ops_api + elif(type == "optim"): + ops_mapping[ops_name_optim] = ops_api + else: + ops_mapping[ops_name] = ops_api + + + return ret, ops_mapping +#API映射表的更新 +def update_ops_mapping(): + pass + +if __name__ == "__main__": + #torch + torch_list, torch_dot_mappings = get_ops_input(type="torch") + torch_dot_list = list(torch_list.keys()) + with open('./testdir/ops/torch_dot_list.json', 'w', encoding='utf-8') as f: + json.dump(torch_dot_list, f, indent=2) + with open('./testdir/mappings/torch_dot_mappings.json', 'w', encoding='utf-8') as f: + json.dump(torch_dot_mappings, f, indent=2) + + #torch.nn + torch_nn_list, nn_mappings = get_ops_input(type="nn") + nn_list = list(torch_nn_list.keys()) + with open('./testdir/ops/nn_list.json', 'w', encoding='utf-8') as f: + json.dump(nn_list, f, indent=2) + with open('./testdir/mappings/nn_mappings.json', 'w', encoding='utf-8') as f: + json.dump(nn_mappings, f, indent=2) + + #torch.nn.functional + nn_f_list, f_mappings = get_ops_input(type="nn.functional") + f_list = list(nn_f_list.keys()) + with open('./testdir/ops/f_list.json', 'w', encoding='utf-8') as f: + json.dump(f_list, f, indent=2) + with open('./testdir/mappings/f_mappings.json', 'w', encoding='utf-8') as f: + json.dump(f_mappings, f, indent=2) + + #torch.tensor + tensor_list, tensor_dot_mappings = get_ops_input(type="tensor") + tensor_dot_list = list(tensor_list.keys()) + with open('./testdir/ops/tensor_dot_list.json', 'w', encoding='utf-8') as f: + json.dump(tensor_dot_list, f, indent=2) + with open('./testdir/mappings/tensor_dot_mappings.json', 'w', encoding='utf-8') as f: + json.dump(tensor_dot_mappings, f, indent=2) + + #torch.optim + torch_optim_list, torch_optim_mappings = get_ops_input(type="optim") + torch_optim_list = list(torch_optim_list.keys()) + with open('./testdir/ops/torch_optim_list.json', 'w', encoding='utf-8') as f: + json.dump(torch_optim_list, f, indent=2) + with open('./testdir/mappings/torch_optim_mappings.json', 'w', encoding='utf-8') as f: + json.dump(torch_optim_mappings, f, indent=2) diff --git a/mindconverter/mindconverter/mappings/f_mappings.json b/mindconverter/mindconverter/mappings/f_mappings.json new file mode 100644 index 0000000000000000000000000000000000000000..58625e433e85cc720db118d16e332960eb667458 --- /dev/null +++ b/mindconverter/mindconverter/mappings/f_mappings.json @@ -0,0 +1,2075 @@ +{ + "F.adaptive_avg_pool1d": { + "ms_api": [ + "ops.adaptive_avg_pool1d", + { + "input": "REQUIRED", + "output_size": "REQUIRED" + } + ], + "pt_api": [ + "F.adaptive_avg_pool1d", + { + "input": "REQUIRED", + "output_size": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "F.adaptive_avg_pool2d": { + "ms_api": [ + "ops.adaptive_avg_pool2d", + { + "input": "REQUIRED", + "output_size": "REQUIRED" + } + ], + "pt_api": [ + "F.adaptive_avg_pool2d", + { + "input": "REQUIRED", + "output_size": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "F.adaptive_avg_pool3d": { + "ms_api": [ + "ops.adaptive_avg_pool3d", + { + "input": "REQUIRED", + "output_size": "REQUIRED" + } + ], + "pt_api": [ + "F.adaptive_avg_pool3d", + { + "input": "REQUIRED", + "output_size": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "F.adaptive_max_pool1d": { + "ms_api": [ + "ops.adaptive_max_pool1d", + { + "input": "REQUIRED", + "output_size": "REQUIRED" + } + ], + "pt_api": [ + "F.adaptive_max_pool1d", + { + "input": "REQUIRED", + "output_size": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "F.adaptive_max_pool2d": { + "ms_api": [ + "ops.adaptive_max_pool2d", + { + "input": "REQUIRED", + "output_size": "REQUIRED" + } + ], + "pt_api": [ + "F.adaptive_max_pool2d", + { + "input": "REQUIRED", + "output_size": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "F.affine_grid": { + "ms_api": [ + "ops.affine_grid", + { + "theta": "REQUIRED", + "size": "REQUIRED", + "align_corners": null + } + ], + "pt_api": [ + "F.affine_grid", + { + "theta": "REQUIRED", + "size": "REQUIRED", + "align_corners": false + } + ], + "ms2pt_mapping": { + "theta": "theta", + "size": "size", + "align_corners": "align_corners" + }, + "gen_explicit_map": null + }, + "F.avg_pool1d": { + "ms_api": [ + "ops.avg_pool1d", + { + "input_x": "REQUIRED", + "kernel_size": 1, + "stride": 1, + "padding": 0, + "ceil_mode": false, + "count_include_pad": true + } + ], + "pt_api": [ + "F.avg_pool1d", + { + "input": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "ceil_mode": false, + "count_include_pad": true + } + ], + "ms2pt_mapping": { + "input_x": "input", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "ceil_mode": "ceil_mode", + "count_include_pad": "count_include_pad" + }, + "gen_explicit_map": null + }, + "F.avg_pool2d": { + "ms_api": [ + "ops.avg_pool2d", + { + "input_x": "REQUIRED", + "kernel_size": 1, + "stride": 1, + "padding": 0, + "ceil_mode": false, + "count_include_pad": true + } + ], + "pt_api": [ + "F.avg_pool2d", + { + "input": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "ceil_mode": false, + "count_include_pad": true + } + ], + "ms2pt_mapping": { + "input_x": "input", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "ceil_mode": "ceil_mode", + "count_include_pad": "count_include_pad" + }, + "gen_explicit_map": null + }, + "F.avg_pool3d": { + "ms_api": [ + "ops.avg_pool3d", + { + "input_x": "REQUIRED", + "kernel_size": 1, + "stride": 1, + "padding": 0, + "ceil_mode": false, + "count_include_pad": true + } + ], + "pt_api": [ + "F.avg_pool3d", + { + "input": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "ceil_mode": false, + "count_include_pad": true + } + ], + "ms2pt_mapping": { + "input_x": "input", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "ceil_mode": "ceil_mode", + "count_include_pad": "count_include_pad" + }, + "gen_explicit_map": null + }, + "F.batch_norm": { + "ms_api": [ + "ops.batch_norm", + { + "input_x": "REQUIRED", + "running_mean": "REQUIRED", + "running_var": "REQUIRED", + "weight": "REQUIRED", + "bias": "REQUIRED", + "training": false, + "momentum": 0.1, + "eps": 1e-05 + } + ], + "pt_api": [ + "F.batch_norm", + { + "input": "REQUIRED", + "running_mean": "REQUIRED", + "running_var": "REQUIRED", + "weight": null, + "bias": null, + "training": false, + "momentum": 0.1, + "eps": 1e-05 + } + ], + "ms2pt_mapping": { + "input_x": "input", + "running_mean": "running_mean", + "running_var": "running_var", + "weight": "weight", + "bias": "bias", + "training": "training", + "momentum": "momentum", + "eps": "eps" + }, + "gen_explicit_map": null + }, + "F.bilinear": { + "ms_api": [ + "ops.bidense", + { + "input1": "REQUIRED", + "input2": "REQUIRED", + "weight": "REQUIRED", + "bias": null + } + ], + "pt_api": [ + "F.bilinear", + { + "input1": "REQUIRED", + "input2": "REQUIRED", + "weight": "REQUIRED", + "bias": null + } + ], + "ms2pt_mapping": { + "input1": "input1", + "input2": "input2", + "weight": "weight", + "bias": "bias" + }, + "gen_explicit_map": null + }, + "F.binary_cross_entropy_with_logits": { + "ms_api": [ + "ops.binary_cross_entropy_with_logits", + { + "logits": "REQUIRED", + "label": "REQUIRED", + "weight": null, + "pos_weight": null, + "reduction": "mean" + } + ], + "pt_api": [ + "F.binary_cross_entropy_with_logits", + { + "input": "REQUIRED", + "target": "REQUIRED", + "weight": null, + "size_average": null, + "reduce": null, + "reduction": "mean", + "pos_weight": null + } + ], + "ms2pt_mapping": { + "logits": "input", + "label": "target", + "weight": "weight", + "pos_weight": "pos_weight", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.celu": { + "ms_api": [ + "ops.celu", + { + "x": "REQUIRED", + "alpha": 1.0 + } + ], + "pt_api": [ + "F.celu", + { + "input": "REQUIRED", + "alpha": 1.0, + "inplace": false + } + ], + "ms2pt_mapping": { + "x": "input", + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + "F.conv1d": { + "ms_api": [ + "ops.conv1d", + { + "input": "REQUIRED", + "weight": "REQUIRED", + "bias": null, + "stride": 1, + "pad_mode": "valid", + "padding": 0, + "dilation": 1, + "groups": 1 + } + ], + "pt_api": [ + "F.conv1d", + { + "input": "REQUIRED", + "weight": "REQUIRED", + "bias": null, + "stride": 1, + "padding": 0, + "dilation": 1, + "groups": 1 + } + ], + "ms2pt_mapping": { + "input": "input", + "weight": "weight", + "bias": "bias", + "stride": "stride", + "padding": "padding", + "dilation": "dilation", + "groups": "groups" + }, + "gen_explicit_map": null + }, + "F.conv2d": { + "ms_api": [ + "ops.conv2d", + { + "input": "REQUIRED", + "weight": "REQUIRED", + "bias": null, + "stride": 1, + "pad_mode": "valid", + "padding": 0, + "dilation": 1, + "groups": 1 + } + ], + "pt_api": [ + "F.conv2d", + { + "input": "REQUIRED", + "weight": "REQUIRED", + "bias": null, + "stride": 1, + "padding": 0, + "dilation": 1, + "groups": 1 + } + ], + "ms2pt_mapping": { + "input": "input", + "weight": "weight", + "bias": "bias", + "stride": "stride", + "padding": "padding", + "dilation": "dilation", + "groups": "groups" + }, + "gen_explicit_map": null + }, + "F.conv3d": { + "ms_api": [ + "ops.conv3d", + { + "input": "REQUIRED", + "weight": "REQUIRED", + "bias": null, + "stride": 1, + "pad_mode": "valid", + "padding": 0, + "dilation": 1, + "groups": 1 + } + ], + "pt_api": [ + "F.conv3d", + { + "input": "REQUIRED", + "weight": "REQUIRED", + "bias": null, + "stride": 1, + "padding": 0, + "dilation": 1, + "groups": 1 + } + ], + "ms2pt_mapping": { + "input": "input", + "weight": "weight", + "bias": "bias", + "stride": "stride", + "padding": "padding", + "dilation": "dilation", + "groups": "groups" + }, + "gen_explicit_map": null + }, + "F.cosine_embedding_loss": { + "ms_api": [ + "ops.cosine_embedding_loss", + { + "input1": "REQUIRED", + "input2": "REQUIRED", + "target": "REQUIRED", + "margin": 0.0, + "reduction": "mean" + } + ], + "pt_api": [ + "F.cosine_embedding_loss", + { + "input1": "REQUIRED", + "input2": "REQUIRED", + "target": "REQUIRED", + "margin": 0, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "input1": "input1", + "input2": "input2", + "target": "target", + "margin": "margin", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.cosine_similarity": { + "ms_api": [ + "ops.cosine_similarity", + { + "x1": "REQUIRED", + "x2": "REQUIRED", + "dim": 1, + "eps": 1e-08 + } + ], + "pt_api": [ + "F.cosine_similarity", + { + "x1": "REQUIRED", + "x2": "REQUIRED", + "dim": 1, + "eps": 1e-08 + } + ], + "ms2pt_mapping": { + "x1": "x1", + "x2": "x2", + "dim": "dim", + "eps": "eps" + }, + "gen_explicit_map": null + }, + "F.cross_entropy": { + "ms_api": [ + "ops.cross_entropy", + { + "input": "REQUIRED", + "target": "REQUIRED", + "weight": null, + "ignore_index": -100, + "reduction": "mean", + "label_smoothing": 0.0 + } + ], + "pt_api": [ + "F.cross_entropy", + { + "input": "REQUIRED", + "target": "REQUIRED", + "weight": null, + "size_average": null, + "ignore_index": -100, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "input": "input", + "target": "target", + "weight": "weight", + "ignore_index": "ignore_index", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.ctc_loss": { + "ms_api": [ + "ops.ctc_loss", + { + "log_probs": "REQUIRED", + "targets": "REQUIRED", + "input_lengths": "REQUIRED", + "target_lengths": "REQUIRED", + "blank": 0, + "reduction": "mean", + "zero_infinity": false + } + ], + "pt_api": [ + "F.ctc_loss", + { + "log_probs": "REQUIRED", + "targets": "REQUIRED", + "input_lengths": "REQUIRED", + "target_lengths": "REQUIRED", + "blank": 0, + "reduction": "mean", + "zero_infinity": false + } + ], + "ms2pt_mapping": { + "log_probs": "log_probs", + "targets": "targets", + "input_lengths": "input_lengths", + "target_lengths": "target_lengths", + "blank": "blank", + "reduction": "reduction", + "zero_infinity": "zero_infinity" + }, + "gen_explicit_map": null + }, + "F.linear": { + "ms_api": [ + "ops.dense", + { + "input": "REQUIRED", + "weight": "REQUIRED", + "bias": null + } + ], + "pt_api": [ + "F.linear", + { + "input": "REQUIRED", + "weight": "REQUIRED", + "bias": null + } + ], + "ms2pt_mapping": { + "input": "input", + "weight": "weight", + "bias": "bias" + }, + "gen_explicit_map": null + }, + "F.dropout": { + "ms_api": [ + "ops.dropout", + { + "input": "REQUIRED", + "p": 0.5, + "training": true, + "seed": null + } + ], + "pt_api": [ + "F.dropout", + { + "input": "REQUIRED", + "p": 0.5, + "training": true, + "inplace": false + } + ], + "ms2pt_mapping": { + "input": "input", + "p": "p", + "training": "training" + }, + "gen_explicit_map": null + }, + "F.dropout2d": { + "ms_api": [ + "ops.dropout2d", + { + "input": "REQUIRED", + "p": 0.5, + "training": true + } + ], + "pt_api": [ + "F.dropout2d", + { + "input": "REQUIRED", + "p": 0.5, + "training": true, + "inplace": false + } + ], + "ms2pt_mapping": { + "input": "input", + "p": "p", + "training": "training" + }, + "gen_explicit_map": null + }, + "F.dropout3d": { + "ms_api": [ + "ops.dropout", + { + "input": "REQUIRED", + "p": 0.5, + "training": true + } + ], + "pt_api": [ + "F.dropout", + { + "input": "REQUIRED", + "p": 0.5, + "training": true, + "inplace": false + } + ], + "ms2pt_mapping": { + "input": "input", + "p": "p", + "training": "training" + }, + "gen_explicit_map": null + }, + "F.elu": { + "ms_api": [ + "ops.elu", + { + "input_x": "REQUIRED", + "alpha": 1.0 + } + ], + "pt_api": [ + "F.elu", + { + "input": "REQUIRED", + "alpha": 1.0, + "inplace": false + } + ], + "ms2pt_mapping": { + "input_x": "input", + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + "F.fold": { + "ms_api": [ + "ops.fold", + { + "input": "REQUIRED", + "output_size": "REQUIRED", + "kernel_size": "REQUIRED", + "dilation": 1, + "padding": 0, + "stride": 1 + } + ], + "pt_api": [ + "F.fold", + { + "input": "REQUIRED", + "output_size": "REQUIRED", + "kernel_size": "REQUIRED", + "dilation": 1, + "padding": 0, + "stride": 1 + } + ], + "ms2pt_mapping": { + "input": "input", + "output_size": "output_size", + "kernel_size": "kernel_size", + "dilation": "dilation", + "padding": "padding", + "stride": "stride" + }, + "gen_explicit_map": null + }, + "F.gelu": { + "ms_api": [ + "ops.gelu", + { + "input_x": "REQUIRED", + "approximate": null + } + ], + "pt_api": [ + "F.gelu", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input_x": "input" + }, + "gen_explicit_map": null + }, + "F.glu": { + "ms_api": [ + "ops.glu", + { + "x": "REQUIRED", + "axis": -1 + } + ], + "pt_api": [ + "F.glu", + { + "input": "REQUIRED", + "dim": -1 + } + ], + "ms2pt_mapping": { + "x": "input", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "F.grid_sample": { + "ms_api": [ + "ops.grid_sample", + { + "input": "REQUIRED", + "grid": "REQUIRED", + "mode": "bilinear", + "padding_mode": "zero", + "align_corners": false + } + ], + "pt_api": [ + "F.grid_sample", + { + "input": "REQUIRED", + "grid": "REQUIRED", + "mode": "bilinear", + "padding_mode": "zero", + "align_corners": null + } + ], + "ms2pt_mapping": { + "input": "input", + "grid": "grid", + "mode": "mode", + "padding_mode": "padding_mode", + "align_corners": "align_corners" + }, + "gen_explicit_map": null + }, + "F.gumbel_softmax": { + "ms_api": [ + "ops.gumbel_softmax", + { + "logits": "REQUIRED", + "tau": 1, + "hard": false, + "dim": -1 + } + ], + "pt_api": [ + "F.gumbel_softmax", + { + "logits": "REQUIRED", + "tau": 1, + "hard": false, + "eps": 1e-10, + "dim": -1 + } + ], + "ms2pt_mapping": { + "logits": "logits", + "tau": "tau", + "hard": "hard", + "dim": "dim" + }, + "gen_explicit_map": null + }, + "F.hardshrink": { + "ms_api": [ + "ops.hardshrink", + { + "x": "REQUIRED", + "lambd": 0.5 + } + ], + "pt_api": [ + "F.hardshrink", + { + "input": "REQUIRED", + "lambd": 0.5 + } + ], + "ms2pt_mapping": { + "x": "input", + "lambd": "lambd" + }, + "gen_explicit_map": null + }, + "F.hardswish": { + "ms_api": [ + "ops.hardswish", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "F.hardswish", + { + "input": "REQUIRED", + "inplace": false + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "F.hardsigmoid": { + "ms_api": [ + "ops.hardswish", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "F.hardswish", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "F.hardtanh": { + "ms_api": [ + "ops.hardtanh", + { + "input": "REQUIRED", + "min_val": -1.0, + "max_val": 1.0 + } + ], + "pt_api": [ + "F.hardtanh", + { + "input": "REQUIRED", + "min_val": -1.0, + "max_val": 1.0, + "inplace": false + } + ], + "ms2pt_mapping": { + "input": "input", + "min_val": "min_val", + "max_val": "max_val" + }, + "gen_explicit_map": null + }, + "F.hinge_embedding_loss": { + "ms_api": [ + "ops.hinge_embedding_loss", + { + "input": "REQUIRED", + "targets": "REQUIRED", + "margin": 1.0, + "reduction": "mean" + } + ], + "pt_api": [ + "F.hinge_embedding_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "margin": 1.0, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "input": "input", + "targets": "target", + "margin": "margin", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.interpolate": { + "ms_api": [ + "ops.interpolate", + { + "input": "REQUIRED", + "size": null, + "scale_factor": null, + "mode": "nearest", + "align_corners": null, + "recompute_scale_factor": null + } + ], + "pt_api": [ + "F.interpolate", + { + "input": "REQUIRED", + "size": null, + "scale_factor": null, + "mode": "nearest", + "align_corners": null, + "recompute_scale_factor": null + } + ], + "ms2pt_mapping": { + "input": "input", + "size": "size", + "scale_factor": "scale_factor", + "mode": "mode", + "align_corners": "align_corners", + "recompute_scale_factor": "recompute_scale_factor" + }, + "gen_explicit_map": null + }, + "F.kl_div": { + "ms_api": [ + "ops.kl_div", + { + "logits": "REQUIRED", + "labels": "REQUIRED", + "reduction": "mean" + } + ], + "pt_api": [ + "F.kl_div", + { + "input": "REQUIRED", + "target": "REQUIRED", + "size_average": null, + "reduce": null, + "reduction": "mean", + "log_target": false + } + ], + "ms2pt_mapping": { + "logits": "input", + "labels": "target", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.l1_loss": { + "ms_api": [ + "ops.l1_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "reduction": "mean" + } + ], + "pt_api": [ + "F.l1_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "input": "input", + "target": "target", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.leaky_relu": { + "ms_api": [ + "ops.leaky_relu", + { + "input": "REQUIRED", + "alpha": 0.2 + } + ], + "pt_api": [ + "F.leaky_relu", + { + "input": "REQUIRED", + "negative_slope": 0.01, + "inplace": false + } + ], + "ms2pt_mapping": { + "input": "input", + "alpha": "negative_slope" + }, + "gen_explicit_map": null + }, + "F.logsigmoid": { + "ms_api": [ + "ops.logsigmoid", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "F.logsigmoid", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "F.log_softmax": { + "ms_api": [ + "ops.log_softmax", + { + "logits": "REQUIRED", + "axis": -1 + } + ], + "pt_api": [ + "F.log_softmax", + { + "input": "REQUIRED", + "dim": null, + "_stacklevel": 3, + "dtype": null + } + ], + "ms2pt_mapping": { + "logits": "input", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "F.lp_pool1d": { + "ms_api": [ + "ops.lp_pool1d", + { + "x": "REQUIRED", + "norm_type": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "ceil_mode": false + } + ], + "pt_api": [ + "F.lp_pool1d", + { + "input": "REQUIRED", + "norm_type": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "ceil_mode": false + } + ], + "ms2pt_mapping": { + "x": "input", + "norm_type": "norm_type", + "kernel_size": "kernel_size", + "stride": "stride", + "ceil_mode": "ceil_mode" + }, + "gen_explicit_map": null + }, + "F.lp_pool2d": { + "ms_api": [ + "ops.lp_pool2d", + { + "x": "REQUIRED", + "norm_type": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "ceil_mode": false + } + ], + "pt_api": [ + "F.lp_pool2d", + { + "input": "REQUIRED", + "norm_type": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "ceil_mode": false + } + ], + "ms2pt_mapping": { + "x": "input", + "norm_type": "norm_type", + "kernel_size": "kernel_size", + "stride": "stride", + "ceil_mode": "ceil_mode" + }, + "gen_explicit_map": null + }, + "F.margin_ranking_loss": { + "ms_api": [ + "ops.margin_ranking_loss", + { + "input1": "REQUIRED", + "input2": "REQUIRED", + "target": "REQUIRED", + "margin": 0.0, + "reduction": "mean" + } + ], + "pt_api": [ + "F.margin_ranking_loss", + { + "input1": "REQUIRED", + "input2": "REQUIRED", + "target": "REQUIRED", + "margin": 0.0, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "input1": "input1", + "input2": "input2", + "target": "target", + "margin": "margin", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.max_pool3d": { + "ms_api": [ + "ops.max_pool3d", + { + "x": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "dilation": 1, + "ceil_mode": false, + "return_indices": false + } + ], + "pt_api": [ + "F.max_pool3d", + { + "input": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "dilation": 1, + "ceil_mode": false, + "return_indices": false + } + ], + "ms2pt_mapping": { + "x": "input", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "dilation": "dilation", + "ceil_mode": "ceil_mode", + "return_indices": "return_indices" + }, + "gen_explicit_map": null + }, + "F.max_unpool1d": { + "ms_api": [ + "ops.max_unpool1d", + { + "x": "REQUIRED", + "indices": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "output_size": null + } + ], + "pt_api": [ + "F.max_unpool1d", + { + "input": "REQUIRED", + "indices": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "output_size": null + } + ], + "ms2pt_mapping": { + "x": "input", + "indices": "indices", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "F.max_unpool2d": { + "ms_api": [ + "ops.max_unpool2d", + { + "x": "REQUIRED", + "indices": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "output_size": null + } + ], + "pt_api": [ + "F.max_unpool2d", + { + "input": "REQUIRED", + "indices": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "output_size": null + } + ], + "ms2pt_mapping": { + "x": "input", + "indices": "indices", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "F.max_unpool3d": { + "ms_api": [ + "ops.max_unpool3d", + { + "x": "REQUIRED", + "indices": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "output_size": null + } + ], + "pt_api": [ + "F.max_unpool3d", + { + "input": "REQUIRED", + "indices": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "output_size": null + } + ], + "ms2pt_mapping": { + "x": "input", + "indices": "indices", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "F.mse_loss": { + "ms_api": [ + "ops.mse_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "reduction": "mean" + } + ], + "pt_api": [ + "F.mse_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "size_average": "REQUIRED", + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "input": "input", + "target": "target", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.multi_margin_loss": { + "ms_api": [ + "ops.multi_margin_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "p": 1, + "margin": 1, + "weight": null, + "reduction": "mean" + } + ], + "pt_api": [ + "F.multi_margin_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "p": 1, + "margin": 1, + "weight": null, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "input": "input", + "target": "target", + "p": "p", + "margin": "margin", + "weight": "weight", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.multilabel_margin_loss": { + "ms_api": [ + "ops.multilabel_margin_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "reduction": "mean" + } + ], + "pt_api": [ + "F.multilabel_margin_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "input": "input", + "target": "target", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.multilabel_soft_margin_loss": { + "ms_api": [ + "ops.multilabel_soft_margin_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "weight": null, + "reduction": "mean" + } + ], + "pt_api": [ + "F.multilabel_soft_margin_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "weight": null, + "size_average": null + } + ], + "ms2pt_mapping": { + "input": "REQUIRED", + "target": "REQUIRED", + "weight": "weight" + }, + "gen_explicit_map": null + }, + "F.nll_loss": { + "ms_api": [ + "ops.nll_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "weight": null, + "ignore_index": -100, + "reduction": "mean", + "label_smoothing": 0.0 + } + ], + "pt_api": [ + "F.nll_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "weight": null, + "size_average": null, + "ignore_index": -100, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "input": "input", + "target": "target", + "weight": "weight", + "ignore_index": "ignore_index", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.pad": { + "ms_api": [ + "ops.pad", + { + "input_x": "REQUIRED", + "padding": "REQUIRED", + "mode": "constant", + "value": null + } + ], + "pt_api": [ + "F.pad", + { + "input": "REQUIRED", + "pad": "REQUIRED", + "mode": "constant", + "value": null + } + ], + "ms2pt_mapping": { + "input_x": "input", + "padding": "pad", + "mode": "mode", + "value": "value" + }, + "gen_explicit_map": null + }, + "F.pdist": { + "ms_api": [ + "ops.pdist", + { + "input": "REQUIRED", + "p": 2.0 + } + ], + "pt_api": [ + "F.pdist", + { + "input": "REQUIRED", + "p": 2.0 + } + ], + "ms2pt_mapping": { + "input": "input", + "p": "p" + }, + "gen_explicit_map": null + }, + "F.pixel_shuffle": { + "ms_api": [ + "ops.pixel_shuffle", + { + "input": "REQUIRED", + "upscale_factor": "REQUIRED" + } + ], + "pt_api": [ + "F.pixel_shuffle", + { + "input": "REQUIRED", + "upscale_factor": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "upscale_factor": "upscale_factor" + }, + "gen_explicit_map": null + }, + "F.pixel_unshuffle": { + "ms_api": [ + "ops.pixel_unshuffle", + { + "input": "REQUIRED", + "downscale_factor": "REQUIRED" + } + ], + "pt_api": [ + "F.pixel_unshuffle", + { + "input": "REQUIRED", + "downscale_factor": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "downscale_factor": "downscale_factor" + }, + "gen_explicit_map": null + }, + "F.prelu": { + "ms_api": [ + "ops.prelu", + { + "x": "REQUIRED", + "weight": "REQUIRED" + } + ], + "pt_api": [ + "F.prelu", + { + "input": "REQUIRED", + "weight": "REQUIRED" + } + ], + "ms2pt_mapping": { + "x": "input", + "weight": "weight" + }, + "gen_explicit_map": null + }, + "F.relu": { + "ms_api": [ + "ops.relu", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "F.relu", + { + "input": "REQUIRED", + "inplace": false + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "F.relu6": { + "ms_api": [ + "ops.relu6", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "F.relu6", + { + "input": "REQUIRED", + "inplace": false + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "F.rrelu": { + "ms_api": [ + "ops.rrelu", + { + "input": "REQUIRED", + "lower": "1/8", + "upper": "1/3" + } + ], + "pt_api": [ + "F.rrelu", + { + "input": "REQUIRED", + "lower": "1/8", + "upper": "1/3", + "training": false, + "inplace": false + } + ], + "ms2pt_mapping": { + "input": "input", + "lower": "lower", + "upper": "upper" + }, + "gen_explicit_map": null + }, + "F.selu": { + "ms_api": [ + "ops.selu", + { + "input_x": "REQUIRED" + } + ], + "pt_api": [ + "F.selu", + { + "input": "REQUIRED", + "inplace": false + } + ], + "ms2pt_mapping": { + "input_x": "input" + }, + "gen_explicit_map": null + }, + "F.sigmoid": { + "ms_api": [ + "ops.sigmoid", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "F.sigmoid", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "F.silu": { + "ms_api": [ + "ops.silu", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "F.silu", + { + "input": "REQUIRED", + "inplace": false + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "F.softmax": { + "ms_api": [ + "ops.softmax", + { + "x": "REQUIRED", + "axis": -1, + "dtype": null + } + ], + "pt_api": [ + "F.softmax", + { + "input": "REQUIRED", + "dim": null, + "_stacklevel": 3, + "dtype": null + } + ], + "ms2pt_mapping": { + "x": "input", + "axis": "dim", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "F.softmin": { + "ms_api": [ + "ops.softmin", + { + "x": "REQUIRED", + "axis": -1, + "dtype": null + } + ], + "pt_api": [ + "F.softmin", + { + "input": "REQUIRED", + "dim": null, + "_stacklevel": 3, + "dtype": null + } + ], + "ms2pt_mapping": { + "x": "input", + "axis": "dim", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "F.softsign": { + "ms_api": [ + "ops.softsign", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "F.softsign", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "F.smooth_l1_loss": { + "ms_api": [ + "ops.smooth_l1_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "beta": 1.0, + "reduction": "none" + } + ], + "pt_api": [ + "F.smooth_l1_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "size_average": null, + "reduce": null, + "reduction": "mean", + "beta": 1.0 + } + ], + "ms2pt_mapping": { + "input": "input", + "target": "target", + "beta": "beta", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.soft_margin_loss": { + "ms_api": [ + "ops.soft_margin_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "reduction": "mean" + } + ], + "pt_api": [ + "F.soft_margin_loss", + { + "input": "REQUIRED", + "target": "REQUIRED", + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "input": "input", + "target": "target", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.softshrink": { + "ms_api": [ + "ops.softshrink", + { + "x": "REQUIRED", + "lambd": 0.5 + } + ], + "pt_api": [ + "F.softshrink", + { + "input": "REQUIRED", + "lambd": 0.5 + } + ], + "ms2pt_mapping": { + "x": "input", + "lambd": "lambd" + }, + "gen_explicit_map": null + }, + "F.tanh": { + "ms_api": [ + "ops.tanh", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "F.tanh", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "F.tanhshrink": { + "ms_api": [ + "ops.tanhshrink", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "F.tanhshrink", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "F.threshold": { + "ms_api": [ + "ops.threshold", + { + "input": "REQUIRED", + "thr": "REQUIRED", + "value": "REQUIRED" + } + ], + "pt_api": [ + "F.threshold", + { + "input": "REQUIRED", + "threshold": "REQUIRED", + "value": "REQUIRED", + "inplace": false + } + ], + "ms2pt_mapping": { + "input": "input", + "thr": "threshold", + "value": "value" + }, + "gen_explicit_map": null + }, + "F.triplet_margin_loss": { + "ms_api": [ + "ops.triplet_margin_loss", + { + "anchor": "REQUIRED", + "positive": "REQUIRED", + "negative": "REQUIRED", + "margin": 1.0, + "p": 2, + "eps": 1e-06, + "swap": false, + "reduction": "mean" + } + ], + "pt_api": [ + "F.triplet_margin_loss", + { + "anchor": "REQUIRED", + "positive": "REQUIRED", + "negative": "REQUIRED", + "margin": 1.0, + "p": 2, + "eps": 1e-06, + "swap": false, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "anchor": "anchor", + "positive": "positive", + "negative": "negative", + "margin": "margin", + "p": "p", + "eps": "eps", + "swap": "swap", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "F.unfold": { + "ms_api": [ + "ops.unfold", + { + "input": "REQUIRED", + "kernel_size": "REQUIRED", + "dilation": 1, + "padding": 0, + "stride": 1 + } + ], + "pt_api": [ + "F.unfold", + { + "input": "REQUIRED", + "kernel_size": "REQUIRED", + "dilation": 1, + "padding": 0, + "stride": 1 + } + ], + "ms2pt_mapping": { + "input": "input", + "kernel_size": "kernel_size", + "dilation": "dilation", + "padding": "padding", + "stride": "stride" + }, + "gen_explicit_map": null + }, + "F.upsample": { + "ms_api": [ + "ops.upsample", + { + "input": "REQUIRED", + "size": "REQUIRED", + "scale_factor": null, + "mode": "nearest", + "align_corners": null, + "recompute_scale_factor": null + } + ], + "pt_api": [ + "F.upsample", + { + "input": "REQUIRED", + "size": "REQUIRED", + "scale_factor": null, + "mode": "nearest", + "align_corners": null + } + ], + "ms2pt_mapping": { + "input": "input", + "size": "size", + "scale_factor": "scale_factor", + "mode": "mode", + "align_corners": "align_corners" + }, + "gen_explicit_map": null + }, + "F.max_pool2d": { + "ms_api": [ + "ops.MaxPool", + { + "ksize": 1, + "strides": 1, + "padding": "valid", + "input": "REQUIRED" + }, + [ + "ksize", + "strides", + "padding" + ] + ], + "pt_api": [ + "F.max_pool2d", + { + "input": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0, + "dilation": 1, + "ceil_mode": false, + "return_indices": false + } + ], + "ms2pt_mapping": { + "ksize": "kernel_size", + "input": "input" + }, + "gen_explicit_map": "gen_explicit_map_f_max_pool2d" + }, + "F.normalize": { + "ms_api": [ + "ops.L2Normalize", + { + "axis": 0, + "epsilon": 0.0001, + "input_x": "REQUIRED" + }, + [ + "axis", + "epsilon" + ] + ], + "pt_api": [ + "F.normalize", + { + "input": "REQUIRED", + "p": 2, + "dim": 1, + "eps": 1e-12 + } + ], + "ms2pt_mapping": { + "input_x": "input", + "epsilon": "eps", + "axis": "dim" + } + } +} \ No newline at end of file diff --git a/mindconverter/mindconverter/mappings/nn_mappings.json b/mindconverter/mindconverter/mappings/nn_mappings.json new file mode 100644 index 0000000000000000000000000000000000000000..73dc0547c632ccb3c22f259089c71d92a38d8ca0 --- /dev/null +++ b/mindconverter/mindconverter/mappings/nn_mappings.json @@ -0,0 +1,2985 @@ +{ + "nn.AdaptiveAvgPool1d": { + "ms_api": [ + "nn.AdaptiveAvgPool1d", + { + "output_size": "REQUIRED" + } + ], + "pt_api": [ + "nn.AdaptiveAvgPool1d", + { + "output_size": "REQUIRED" + } + ], + "ms2pt_mapping": { + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "nn.AdaptiveAvgPool2d": { + "ms_api": [ + "nn.AdaptiveAvgPool2d", + { + "output_size": "REQUIRED" + } + ], + "pt_api": [ + "nn.AdaptiveAvgPool2d", + { + "output_size": "REQUIRED" + } + ], + "ms2pt_mapping": { + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "nn.AdaptiveAvgPool3d": { + "ms_api": [ + "nn.AdaptiveAvgPool3d", + { + "output_size": "REQUIRED" + } + ], + "pt_api": [ + "nn.AdaptiveAvgPool3d", + { + "output_size": "REQUIRED" + } + ], + "ms2pt_mapping": { + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "nn.AdaptiveMaxPool1d": { + "ms_api": [ + "nn.AdaptiveMaxPool1d", + { + "output_size": "REQUIRED" + } + ], + "pt_api": [ + "nn.AdaptiveMaxPool1d", + { + "output_size": "REQUIRED", + "return_indices": false + } + ], + "ms2pt_mapping": { + "output_size": "output_size" + }, + "gen_explicit_map": null + }, + "nn.AdaptiveMaxPool2d": { + "ms_api": [ + "nn.AdaptiveMaxPool2d", + { + "output_size": "REQUIRED", + "return_indices": false + } + ], + "pt_api": [ + "nn.AdaptiveMaxPool2d", + { + "output_size": "REQUIRED", + "return_indices": false + } + ], + "ms2pt_mapping": { + "output_size": "output_size", + "return_indices": "return_indices" + }, + "gen_explicit_map": null + }, + "nn.AdaptiveMaxPool3d": { + "ms_api": [ + "nn.AdaptiveMaxPool3d", + { + "output_size": "REQUIRED", + "return_indices": false + } + ], + "pt_api": [ + "nn.AdaptiveMaxPool3d", + { + "output_size": "REQUIRED", + "return_indices": false + } + ], + "ms2pt_mapping": { + "output_size": "output_size", + "return_indices": "return_indices" + }, + "gen_explicit_map": null + }, + "nn.AvgPool1d": { + "ms_api": [ + "nn.AvgPool1d", + { + "kernel_size": 1.0, + "stride": 1.0, + "pad_mode": "valid", + "padding": 0.0, + "ceil_mode": false, + "count_include_pad": true + } + ], + "pt_api": [ + "nn.AvgPool1d", + { + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0.0, + "ceil_mode": false, + "count_include_pad": true + } + ], + "ms2pt_mapping": { + "kernel_size": "kernel_size", + "stride": "stride", + "pad_mode": "padding", + "padding": "padding", + "ceil_mode": "ceil_mode", + "count_include_pad": "count_include_pad" + }, + "gen_explicit_map": null + }, + "nn.AvgPool2d": { + "ms_api": [ + "nn.AvgPool2d", + { + "kernel_size": 1.0, + "stride": 1.0, + "pad_mode": "valid", + "padding": 0.0, + "ceil_mode": false, + "count_include_pad": true, + "divisor_override": null, + "data_format": "NCHW" + } + ], + "pt_api": [ + "nn.AvgPool2d", + { + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0.0, + "ceil_mode": false, + "count_include_pad": true, + "divisor_override": null + } + ], + "ms2pt_mapping": { + "kernel_size": "kernel_size", + "stride": "stride", + "pad_mode": "padding", + "padding": "padding", + "ceil_mode": "ceil_mode", + "count_include_pad": "count_include_pad", + "divisor_override": "divisor_override" + }, + "gen_explicit_map": null + }, + "nn.AvgPool3d": { + "ms_api": [ + "nn.AvgPool3d", + { + "kernel_size": 1.0, + "stride": 1.0, + "pad_mode": "valid", + "padding": 0.0, + "ceil_mode": false, + "count_include_pad": true, + "divisor_override": null + } + ], + "pt_api": [ + "nn.AvgPool3d", + { + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0.0, + "ceil_mode": false, + "count_include_pad": true, + "divisor_override": null + } + ], + "ms2pt_mapping": { + "kernel_size": "kernel_size", + "stride": "stride", + "pad_mode": "padding", + "padding": "padding", + "ceil_mode": "ceil_mode", + "count_include_pad": "count_include_pad", + "divisor_override": "divisor_override" + }, + "gen_explicit_map": null + }, + "nn.BCELoss": { + "ms_api": [ + "nn.BCELoss", + { + "weight": null, + "reduction": "mean" + } + ], + "pt_api": [ + "nn.BCELoss", + { + "weight": null, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "weight": "weight", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.BCEWithLogitsLoss": { + "ms_api": [ + "nn.BCEWithLogitsLoss", + { + "reduction": "mean", + "weight": null, + "pos_weight": null + } + ], + "pt_api": [ + "nn.BCEWithLogitsLoss", + { + "weight": null, + "size_average": null, + "reduce": null, + "reduction": "mean", + "pos_weight": null + } + ], + "ms2pt_mapping": { + "reduction": "reduction", + "weight": "weight", + "pos_weight": "pos_weight" + }, + "gen_explicit_map": null + }, + "nn.BatchNorm1d": { + "ms_api": [ + "nn.BatchNorm1d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.9, + "affine": true, + "gamma_init": "ones", + "beta_init": "zeros", + "moving_mean_init": "zeros", + "moving_var_init": "ones", + "use_batch_statistics": null, + "data_format": "NCHW", + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.BatchNorm1d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.1, + "affine": true, + "track_running_stats": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "num_features": "num_features", + "eps": "eps", + "momentum": "momentum", + "affine": "affine", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.BatchNorm2d": { + "ms_api": [ + "nn.BatchNorm2d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.9, + "affine": true, + "gamma_init": "ones", + "beta_init": "zeros", + "moving_mean_init": "zeros", + "moving_var_init": "ones", + "use_batch_statistics": null, + "data_format": "NCHW", + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.BatchNorm2d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.1, + "affine": true, + "track_running_stats": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "num_features": "num_features", + "eps": "eps", + "momentum": "momentum", + "affine": "affine", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.BatchNorm3d": { + "ms_api": [ + "nn.BatchNorm3d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.9, + "affine": true, + "gamma_init": "ones", + "beta_init": "zeros", + "moving_mean_init": "zeros", + "moving_var_init": "ones", + "use_batch_statistics": null, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.BatchNorm3d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.1, + "affine": true, + "track_running_stats": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "num_features": "num_features", + "eps": "eps", + "momentum": "momentum", + "affine": "affine", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.Bilinear": { + "ms_api": [ + "nn.BiDense", + { + "in1_channels": "REQUIRED", + "in2_channels": "REQUIRED", + "out_channels": "REQUIRED", + "weight_init": null, + "bias_init": null, + "has_bias": true, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.Bilinear", + { + "in1_features": "REQUIRED", + "in2_features": "REQUIRED", + "out_features": "REQUIRED", + "bias": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "in1_channels": "in1_features", + "in2_channels": "in2_features", + "out_channels": "out_features", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.CeLU": { + "ms_api": [ + "nn.CELU", + { + "alpha": 1.0 + } + ], + "pt_api": [ + "nn.CeLU", + { + "alpha": 1.0, + "inplace": false + } + ], + "ms2pt_mapping": { + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + "nn.ChannelShuffle": { + "ms_api": [ + "nn.ChannelShuffle", + { + "groups": "REQUIRED" + } + ], + "pt_api": [ + "nn.ChannelShuffle", + { + "groups": "REQUIRED" + } + ], + "ms2pt_mapping": { + "groups": "groups" + }, + "gen_explicit_map": null + }, + "nn.CTCLoss": { + "ms_api": [ + "nn.CTCLoss", + { + "blank": 0.0, + "reduction": "mean", + "zero_infinity": false + } + ], + "pt_api": [ + "nn.CTCLoss", + { + "blank": 0.0, + "reduction": "mean", + "zero_infinity": false + } + ], + "ms2pt_mapping": { + "blank": "blank", + "reduction": "reduction", + "zero_infinity": "zero_infinity" + }, + "gen_explicit_map": null + }, + "nn.ConstantPad1d": { + "ms_api": [ + "nn.ConstantPad1d", + { + "padding": "REQUIRED", + "value": "REQUIRED" + } + ], + "pt_api": [ + "nn.ConstantPad1d", + { + "padding": "REQUIRED", + "value": "REQUIRED" + } + ], + "ms2pt_mapping": { + "padding": "padding", + "value": "value" + }, + "gen_explicit_map": null + }, + "nn.ConstantPad2d": { + "ms_api": [ + "nn.ConstantPad2d", + { + "padding": "REQUIRED", + "value": "REQUIRED" + } + ], + "pt_api": [ + "nn.ConstantPad2d", + { + "padding": "REQUIRED", + "value": "REQUIRED" + } + ], + "ms2pt_mapping": { + "padding": "padding", + "value": "value" + }, + "gen_explicit_map": null + }, + "nn.ConstantPad3d": { + "ms_api": [ + "nn.ConstantPad3d", + { + "padding": "REQUIRED", + "value": "REQUIRED" + } + ], + "pt_api": [ + "nn.ConstantPad3d", + { + "padding": "REQUIRED", + "value": "REQUIRED" + } + ], + "ms2pt_mapping": { + "padding": "padding", + "value": "value" + }, + "gen_explicit_map": null + }, + "nn.Conv1d": { + "ms_api": [ + "nn.Conv1d", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "pad_mode": "same", + "padding": 0.0, + "dilation": 1.0, + "group": 1.0, + "has_bias": false, + "weight_init": null, + "bias_init": null, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.Conv1d", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "padding": 0.0, + "dilation": 1.0, + "groups": 1.0, + "bias": true, + "padding_mode": "zeros", + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "in_channels": "in_channels", + "out_channels": "out_channels", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "dilation": "dilation" + }, + "gen_explicit_map": null + }, + "nn.Conv2d": { + "ms_api": [ + "nn.Conv2d", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "pad_mode": "same", + "padding": 0.0, + "dilation": 1.0, + "group": 1.0, + "has_bias": false, + "weight_init": null, + "bias_init": null, + "data_format": "NCHW", + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.Conv2d", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "padding": 0.0, + "dilation": 1.0, + "groups": 1.0, + "bias": true, + "padding_mode": "zeros", + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "in_channels": "in_channels", + "out_channels": "out_channels", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "dilation": "dilation" + }, + "gen_explicit_map": null + }, + "nn.Conv3d": { + "ms_api": [ + "nn.Conv3d", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "pad_mode": "same", + "padding": 0.0, + "dilation": 1.0, + "group": 1.0, + "has_bias": false, + "weight_init": null, + "bias_init": null, + "data_format": "NCDHW", + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.Conv3d", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "padding": 0.0, + "dilation": 1.0, + "groups": 1.0, + "bias": true, + "padding_mode": "zeros", + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "in_channels": "in_channels", + "out_channels": "out_channels", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "dilation": "dilation" + }, + "gen_explicit_map": null + }, + "nn.ConvTranspose1d": { + "ms_api": [ + "nn.Conv1dTranspose", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "pad_mode": "same", + "padding": 0.0, + "dilation": 1.0, + "group": 1.0, + "has_bias": false, + "weight_init": null, + "bias_init": null, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.ConvTranspose1d", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "padding": 0.0, + "output_padding": 0.0, + "groups": 1.0, + "bias": true, + "dilation": 1.0, + "padding_mode": "zeros", + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "in_channels": "in_channels", + "out_channels": "out_channels", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "dilation": "dilation" + }, + "gen_explicit_map": null + }, + "nn.ConvTranspose2d": { + "ms_api": [ + "nn.Conv2dTranspose", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "pad_mode": "same", + "padding": 0.0, + "output_padding": 0.0, + "dilation": 1.0, + "group": 1.0, + "has_bias": false, + "weight_init": null, + "bias_init": null, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.ConvTranspose2d", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "padding": 0.0, + "output_padding": 0.0, + "groups": 1.0, + "bias": true, + "dilation": 1.0, + "padding_mode": "zeros", + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "in_channels": "in_channels", + "out_channels": "out_channels", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "output_padding": "output_padding", + "dilation": "dilation" + }, + "gen_explicit_map": null + }, + "nn.ConvTranspose3d": { + "ms_api": [ + "nn.Conv3dTranspose", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "pad_mode": "same", + "padding": 0.0, + "dilation": 1.0, + "group": 1.0, + "output_padding": 0.0, + "has_bias": false, + "weight_init": null, + "bias_init": null, + "data_format": "NCDHW", + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.ConvTranspose3d", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": 1.0, + "padding": 0.0, + "output_padding": 0.0, + "groups": 1.0, + "bias": true, + "dilation": 1.0, + "padding_mode": "zeros", + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "in_channels": "in_channels", + "out_channels": "out_channels", + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "dilation": "dilation", + "output_padding": "output_padding" + }, + "gen_explicit_map": null + }, + "nn.CosineEmbeddingLoss": { + "ms_api": [ + "nn.CosineEmbeddingLoss", + { + "margin": 0.0, + "reduction": "mean" + } + ], + "pt_api": [ + "nn.CosineEmbeddingLoss", + { + "margin": 0.0, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "margin": "margin", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.CrossEntropyLoss": { + "ms_api": [ + "nn.CrossEntropyLoss", + { + "weight": null, + "ignore_index": "- 100", + "reduction": "mean", + "label_smoothing": 0.0 + } + ], + "pt_api": [ + "nn.CrossEntropyLoss", + { + "weight": null, + "size_average": null, + "ignore_index": -100.0, + "reduce": null, + "reduction": "mean", + "label_smoothing": 0.0 + } + ], + "ms2pt_mapping": { + "weight": "weight", + "ignore_index": "ignore_index", + "reduction": "reduction", + "label_smoothing": "label_smoothing" + }, + "gen_explicit_map": null + }, + "nn.Dropout": { + "ms_api": [ + "nn.Dropout", + { + "keep_prob": 0.5, + "p": null, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.Dropout", + { + "p": 0.5, + "inplace": false + } + ], + "ms2pt_mapping": { + "keep_prob": "p", + "p": "p" + }, + "gen_explicit_map": null + }, + "nn.Dropout2d": { + "ms_api": [ + "nn.Dropout2d", + { + "p": 0.5 + } + ], + "pt_api": [ + "nn.Dropout2d", + { + "p": 0.5, + "inplace": false + } + ], + "ms2pt_mapping": { + "p": "p" + }, + "gen_explicit_map": null + }, + "nn.Dropout3d": { + "ms_api": [ + "nn.Dropout3d", + { + "p": 0.5 + } + ], + "pt_api": [ + "nn.Dropout3d", + { + "p": 0.5, + "inplace": false + } + ], + "ms2pt_mapping": { + "p": "p" + }, + "gen_explicit_map": null + }, + "nn.ELU": { + "ms_api": [ + "nn.ELU", + { + "alpha": 1.0 + } + ], + "pt_api": [ + "nn.ELU", + { + "alpha": 1.0, + "inplace": false + } + ], + "ms2pt_mapping": { + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + "nn.Flatten": { + "ms_api": [ + "nn.Flatten", + { + "start_dim": 1.0, + "end_dim": "- 1" + } + ], + "pt_api": [ + "nn.Flatten", + { + "start_dim": 1.0, + "end_dim": -1.0 + } + ], + "ms2pt_mapping": { + "start_dim": "start_dim", + "end_dim": "end_dim" + }, + "gen_explicit_map": null + }, + "nn.GaussianNLLLoss": { + "ms_api": [ + "nn.GaussianNLLLoss", + { + "full": false, + "eps": 1e-06, + "reduction": "mean" + } + ], + "pt_api": [ + "nn.GaussianNLLLoss", + { + "full": false, + "eps": 1e-06, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "full": "full", + "eps": "eps", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.GELU": { + "ms_api": [ + "nn.GELU", + { + "approximate": true + } + ], + "pt_api": [ + "nn.GELU", + { + "approximate": "none" + } + ], + "ms2pt_mapping": { + "approximate": "approximate" + }, + "gen_explicit_map": null + }, + "nn.GRU": { + "ms_api": [ + "nn.GRU", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "pt_api": [ + "nn.GRU", + { + "self": "REQUIRED", + "input_size": "REQUIRED", + "hidden_size": "REQUIRED", + "num_layers": 1.0, + "bias": true, + "batch_first": false, + "dropout": 0.0, + "bidirectional": false, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.GRUCell": { + "ms_api": [ + "nn.GRUCell", + { + "input_size": "REQUIRED", + "hidden_size": "REQUIRED", + "has_bias": true, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.GRUCell", + { + "input_size": "REQUIRED", + "hidden_size": "REQUIRED", + "bias": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "input_size": "input_size", + "hidden_size": "hidden_size", + "has_bias": "bias" + }, + "gen_explicit_map": null + }, + "nn.GroupNorm": { + "ms_api": [ + "nn.GroupNorm", + { + "num_groups": "REQUIRED", + "num_channels": "REQUIRED", + "eps": 1e-05, + "affine": true, + "gamma_init": "ones", + "beta_init": "zeros", + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.GroupNorm", + { + "num_groups": "REQUIRED", + "num_channels": "REQUIRED", + "eps": 1e-05, + "affine": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "num_groups": "num_groups", + "num_channels": "num_channels", + "eps": "eps", + "affine": "affine" + }, + "gen_explicit_map": null + }, + "nn.Hardshrink": { + "ms_api": [ + "nn.HShrink", + { + "lambd": 0.5 + } + ], + "pt_api": [ + "nn.Hardshrink", + { + "lambd": 0.5 + } + ], + "ms2pt_mapping": { + "lambd": "lambd" + }, + "gen_explicit_map": null + }, + "nn.Hardsigmoid": { + "ms_api": [ + "nn.Hsigmoid", + { + "input_x": "REQUIRED" + } + ], + "pt_api": [ + "nn.Hardsigmoid", + { + "inplace": false + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.Hardswish": { + "ms_api": [ + "nn.Hswish", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "nn.Hardswish", + { + "inplace": false + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.Hardtanh": { + "ms_api": [ + "nn.Hardtanh", + { + "min_val": "- 1.0", + "max_val": 1.0 + } + ], + "pt_api": [ + "nn.Hardtanh", + { + "min_val": -1.0, + "max_val": 1.0, + "inplace": false, + "min_value": null, + "max_value": null + } + ], + "ms2pt_mapping": { + "min_val": "min_val", + "max_val": "max_val" + }, + "gen_explicit_map": null + }, + "nn.HingeEmbeddingLoss": { + "ms_api": [ + "nn.HingeEmbeddingLoss", + { + "margin": 1.0, + "reduction": "mean" + } + ], + "pt_api": [ + "nn.HingeEmbeddingLoss", + { + "margin": 1.0, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "margin": "margin", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.HuberLoss": { + "ms_api": [ + "nn.HuberLoss", + { + "reduction": "mean", + "delta": 1.0 + } + ], + "pt_api": [ + "nn.HuberLoss", + { + "reduction": "mean", + "delta": 1.0 + } + ], + "ms2pt_mapping": { + "reduction": "reduction", + "delta": "delta" + }, + "gen_explicit_map": null + }, + "nn.Identity": { + "ms_api": [ + "nn.Identity", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "nn.Identity", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.init.uniform_": { + "ms_api": [ + "ops.uniform", + { + "shape": "REQUIRED", + "minval": "REQUIRED", + "maxval": "REQUIRED", + "seed": null, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.init.uniform_", + { + "tensor": "REQUIRED", + "a": 0.0, + "b": 1.0 + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.InstanceNorm1d": { + "ms_api": [ + "nn.InstanceNorm1d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.1, + "affine": true, + "gamma_init": "ones", + "beta_init": "zeros", + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.InstanceNorm1d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.1, + "affine": false, + "track_running_stats": false, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "num_features": "num_features", + "eps": "eps", + "momentum": "momentum", + "affine": "affine" + }, + "gen_explicit_map": null + }, + "nn.InstanceNorm2d": { + "ms_api": [ + "nn.InstanceNorm2d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.1, + "affine": true, + "gamma_init": "ones", + "beta_init": "zeros", + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.InstanceNorm2d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.1, + "affine": false, + "track_running_stats": false, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "num_features": "num_features", + "eps": "eps", + "momentum": "momentum", + "affine": "affine" + }, + "gen_explicit_map": null + }, + "nn.InstanceNorm3d": { + "ms_api": [ + "nn.InstanceNorm3d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.1, + "affine": true, + "gamma_init": "ones", + "beta_init": "zeros", + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.InstanceNorm3d", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.1, + "affine": false, + "track_running_stats": false, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "num_features": "num_features", + "eps": "eps", + "momentum": "momentum", + "affine": "affine", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.KLDivLoss": { + "ms_api": [ + "nn.KLDivLoss", + { + "reduction": "mean" + } + ], + "pt_api": [ + "nn.KLDivLoss", + { + "size_average": null, + "reduce": null, + "reduction": "mean", + "log_target": false + } + ], + "ms2pt_mapping": { + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.L1Loss": { + "ms_api": [ + "nn.L1Loss", + { + "reduction": "mean" + } + ], + "pt_api": [ + "nn.L1Loss", + { + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.LayerNorm": { + "ms_api": [ + "nn.LayerNorm", + { + "normalized_shape": "REQUIRED", + "begin_norm_axis": "- 1", + "begin_params_axis": "- 1", + "gamma_init": "ones", + "beta_init": "zeros", + "epsilon": 1e-07, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.LayerNorm", + { + "normalized_shape": "REQUIRED", + "eps": 1e-05, + "elementwise_affine": true, + "bias": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "normalized_shape": "normalized_shape", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.LeakyReLU": { + "ms_api": [ + "nn.LeakyReLU", + { + "alpha": 0.2 + } + ], + "pt_api": [ + "nn.LeakyReLU", + { + "negative_slope": 0.01, + "inplace": false + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.LPPool1d": { + "ms_api": [ + "nn.LPPool1d", + { + "norm_type": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "ceil_mode": false + } + ], + "pt_api": [ + "nn.LPPool1d", + { + "norm_type": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "ceil_mode": false + } + ], + "ms2pt_mapping": { + "norm_type": "norm_type", + "kernel_size": "kernel_size", + "stride": "stride", + "ceil_mode": "ceil_mode" + }, + "gen_explicit_map": null + }, + "nn.LPPool2d": { + "ms_api": [ + "nn.LPPool2d", + { + "norm_type": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "ceil_mode": false + } + ], + "pt_api": [ + "nn.LPPool2d", + { + "norm_type": "REQUIRED", + "kernel_size": "REQUIRED", + "stride": null, + "ceil_mode": false + } + ], + "ms2pt_mapping": { + "norm_type": "norm_type", + "kernel_size": "kernel_size", + "stride": "stride", + "ceil_mode": "ceil_mode" + }, + "gen_explicit_map": null + }, + "nn.LSTM": { + "ms_api": [ + "nn.LSTM", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "pt_api": [ + "nn.LSTM", + { + "self": "REQUIRED", + "input_size": "REQUIRED", + "hidden_size": "REQUIRED", + "num_layers": 1.0, + "bias": true, + "batch_first": false, + "dropout": 0.0, + "bidirectional": false, + "proj_size": 0.0, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.LSTMCell": { + "ms_api": [ + "nn.LSTMCell", + { + "input_size": "REQUIRED", + "hidden_size": "REQUIRED", + "has_bias": true, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.LSTMCell", + { + "input_size": "REQUIRED", + "hidden_size": "REQUIRED", + "bias": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "input_size": "input_size", + "hidden_size": "hidden_size", + "has_bias": "bias", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.Linear": { + "ms_api": [ + "nn.Dense", + { + "in_channels": "REQUIRED", + "out_channels": "REQUIRED", + "weight_init": null, + "bias_init": null, + "has_bias": true, + "activation": null, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.Linear", + { + "in_features": "REQUIRED", + "out_features": "REQUIRED", + "bias": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "in_channels": "in_features", + "out_channels": "out_features" + }, + "gen_explicit_map": null + }, + "nn.LocalResponseNorm": { + "ms_api": [ + "nn.LRN", + { + "depth_radius": 5.0, + "bias": 1.0, + "alpha": 1.0, + "beta": 0.5, + "norm_region": "ACROSS_CHANNELS" + } + ], + "pt_api": [ + "nn.LocalResponseNorm", + { + "size": "REQUIRED", + "alpha": 0.0001, + "beta": 0.75, + "k": 1.0 + } + ], + "ms2pt_mapping": { + "alpha": "alpha", + "beta": "beta" + }, + "gen_explicit_map": null + }, + "nn.LogSigmoid": { + "ms_api": [ + "nn.LogSigmoid", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "nn.LogSigmoid", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.LogSoftMax": { + "ms_api": [ + "nn.LogSoftMax", + { + "axis": -1 + } + ], + "pt_api": [ + "nn.LogSoftMax", + { + "dim": null + } + ], + "ms2pt_mapping": { + "axis": "dim" + }, + "gen_explicit_map": null + }, + "nn.MSELoss": { + "ms_api": [ + "nn.MSELoss", + { + "reduction": "mean" + } + ], + "pt_api": [ + "nn.MSELoss", + { + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.MarginRankingLoss": { + "ms_api": [ + "nn.MarginRankingLoss", + { + "margin": 0.0, + "reduction": "mean" + } + ], + "pt_api": [ + "nn.MarginRankingLoss", + { + "margin": 0.0, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "margin": "margin", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.MaxPool1d": { + "ms_api": [ + "nn.MaxPool1d", + { + "kernel_size": 1.0, + "stride": 1.0, + "pad_mode": "valid", + "padding": 0.0, + "dilation": 1.0, + "return_indices": false, + "ceil_mode": false + } + ], + "pt_api": [ + "nn.MaxPool1d", + { + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0.0, + "dilation": 1.0, + "return_indices": false, + "ceil_mode": false + } + ], + "ms2pt_mapping": { + "kernel_size": "kernel_size", + "stride": "stride", + "pad_mode": "padding", + "padding": "padding", + "dilation": "dilation", + "return_indices": "return_indices", + "ceil_mode": "ceil_mode" + }, + "gen_explicit_map": null + }, + "nn.MaxPool2d": { + "ms_api": [ + "nn.MaxPool2d", + { + "kernel_size": 1.0, + "stride": 1.0, + "pad_mode": "valid", + "padding": 0.0, + "dilation": 1.0, + "return_indices": false, + "ceil_mode": false, + "data_format": "NCHW" + } + ], + "pt_api": [ + "nn.MaxPool2d", + { + "kernel_size": "REQUIRED", + "stride": 1, + "padding": 0.0, + "dilation": 1.0, + "return_indices": false, + "ceil_mode": false + } + ], + "ms2pt_mapping": { + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding", + "dilation": "dilation", + "return_indices": "return_indices", + "ceil_mode": "ceil_mode" + }, + "gen_explicit_map": null + }, + "nn.MaxPool3d": { + "ms_api": [ + "nn.MaxPool3d", + { + "kernel_size": 1.0, + "stride": 1.0, + "pad_mode": "valid", + "padding": 0.0, + "dilation": 1.0, + "return_indices": false, + "ceil_mode": false + } + ], + "pt_api": [ + "nn.MaxPool3d", + { + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0.0, + "dilation": 1.0, + "return_indices": false, + "ceil_mode": false + } + ], + "ms2pt_mapping": { + "kernel_size": "kernel_size", + "stride": "stride", + "pad_mode": "padding", + "padding": "padding", + "dilation": "dilation", + "return_indices": "return_indices", + "ceil_mode": "ceil_mode" + }, + "gen_explicit_map": null + }, + "nn.MaxUnpool1d": { + "ms_api": [ + "nn.MaxUnpool1d", + { + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0.0 + } + ], + "pt_api": [ + "nn.MaxUnpool1d", + { + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0.0 + } + ], + "ms2pt_mapping": { + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding" + }, + "gen_explicit_map": null + }, + "nn.MaxUnpool2d": { + "ms_api": [ + "nn.MaxUnpool2d", + { + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0.0 + } + ], + "pt_api": [ + "nn.MaxUnpool2d", + { + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0.0 + } + ], + "ms2pt_mapping": { + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding" + }, + "gen_explicit_map": null + }, + "nn.MaxUnpool3d": { + "ms_api": [ + "nn.MaxUnpool3d", + { + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0.0 + } + ], + "pt_api": [ + "nn.MaxUnpool3d", + { + "kernel_size": "REQUIRED", + "stride": null, + "padding": 0.0 + } + ], + "ms2pt_mapping": { + "kernel_size": "kernel_size", + "stride": "stride", + "padding": "padding" + }, + "gen_explicit_map": null + }, + "nn.ModuleDict": { + "ms_api": [ + "nn.CellDict", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "pt_api": [ + "nn.ModuleDict", + { + "modules": null + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.ModuleList": { + "ms_api": [ + "nn.CellList", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "pt_api": [ + "nn.ModuleList", + { + "modules": null + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.MultiheadAttention": { + "ms_api": [ + "nn.MultiheadAttention", + { + "embed_dim": "REQUIRED", + "num_heads": "REQUIRED", + "dropout": 0.0, + "has_bias": true, + "add_bias_kv": false, + "add_zero_attn": false, + "kdim": null, + "vdim": null, + "batch_first": false, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.MultiheadAttention", + { + "embed_dim": "REQUIRED", + "num_heads": "REQUIRED", + "dropout": 0.0, + "bias": true, + "add_bias_kv": false, + "add_zero_attn": false, + "kdim": null, + "vdim": null, + "batch_first": false, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "embed_dim": "embed_dim", + "num_heads": "num_heads", + "dropout": "dropout", + "has_bias": "bias", + "add_bias_kv": "add_bias_kv", + "add_zero_attn": "add_zero_attn", + "kdim": "kdim", + "vdim": "vdim", + "batch_first": "batch_first", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.MultiLabelMarginLoss": { + "ms_api": [ + "nn.MultiLabelMarginLoss", + { + "reduction": "mean" + } + ], + "pt_api": [ + "nn.MultiLabelMarginLoss", + { + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.MultiLabelSoftMarginLoss": { + "ms_api": [ + "nn.MultiLabelSoftMarginLoss", + { + "weight": null, + "reduction": "mean" + } + ], + "pt_api": [ + "nn.MultiLabelSoftMarginLoss", + { + "weight": null, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "weight": "weight", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.MultiMarginLoss": { + "ms_api": [ + "nn.MultiMarginLoss", + { + "p": 1.0, + "margin": 1.0, + "reduction": "mean", + "weight": null + } + ], + "pt_api": [ + "nn.MultiMarginLoss", + { + "p": 1.0, + "margin": 1.0, + "weight": null, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "p": "p", + "margin": "margin", + "reduction": "reduction", + "weight": "weight" + }, + "gen_explicit_map": null + }, + "nn.NLLLoss": { + "ms_api": [ + "nn.NLLLoss", + { + "weight": null, + "ignore_index": "- 100", + "reduction": "mean" + } + ], + "pt_api": [ + "nn.NLLLoss", + { + "weight": null, + "size_average": null, + "ignore_index": -100.0, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "weight": "weight", + "ignore_index": "ignore_index", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.PReLU": { + "ms_api": [ + "nn.PReLU", + { + "channel": 1.0, + "w": 0.25 + } + ], + "pt_api": [ + "nn.PReLU", + { + "num_parameters": 1.0, + "init": 0.25, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.PixelShuffle": { + "ms_api": [ + "nn.PixelShuffle", + { + "upscale_factor": "REQUIRED" + } + ], + "pt_api": [ + "nn.PixelShuffle", + { + "upscale_factor": "REQUIRED" + } + ], + "ms2pt_mapping": { + "upscale_factor": "upscale_factor" + }, + "gen_explicit_map": null + }, + "nn.PixelUnshuffle": { + "ms_api": [ + "nn.PixelUnshuffle", + { + "downscale_factor": "REQUIRED" + } + ], + "pt_api": [ + "nn.PixelUnshuffle", + { + "downscale_factor": "REQUIRED" + } + ], + "ms2pt_mapping": { + "downscale_factor": "downscale_factor" + }, + "gen_explicit_map": null + }, + "nn.PoissonNLLLoss": { + "ms_api": [ + "nn.PoissonNLLLoss", + { + "log_input": true, + "full": false, + "eps": 1e-08, + "reduction": "mean" + } + ], + "pt_api": [ + "nn.PoissonNLLLoss", + { + "log_input": true, + "full": false, + "size_average": null, + "eps": 1e-08, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "log_input": "log_input", + "full": "full", + "eps": "eps", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.ReflectionPad1d": { + "ms_api": [ + "nn.ReflectionPad1d", + { + "padding": "REQUIRED" + } + ], + "pt_api": [ + "nn.ReflectionPad1d", + { + "padding": "REQUIRED" + } + ], + "ms2pt_mapping": { + "padding": "padding" + }, + "gen_explicit_map": null + }, + "nn.ReflectionPad2d": { + "ms_api": [ + "nn.ReflectionPad2d", + { + "padding": "REQUIRED" + } + ], + "pt_api": [ + "nn.ReflectionPad2d", + { + "padding": "REQUIRED" + } + ], + "ms2pt_mapping": { + "padding": "padding" + }, + "gen_explicit_map": null + }, + "nn.ReLU": { + "ms_api": [ + "nn.ReLU", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "nn.ReLU", + { + "inplace": false + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.ReLU6": { + "ms_api": [ + "nn.ReLU6", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "nn.ReLU6", + { + "inplace": false + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.ReplicationPad1d": { + "ms_api": [ + "nn.ReplicationPad1d", + { + "padding": "REQUIRED" + } + ], + "pt_api": [ + "nn.ReplicationPad1d", + { + "padding": "REQUIRED" + } + ], + "ms2pt_mapping": { + "padding": "padding" + }, + "gen_explicit_map": null + }, + "nn.ReplicationPad2d": { + "ms_api": [ + "nn.ReplicationPad2d", + { + "padding": "REQUIRED" + } + ], + "pt_api": [ + "nn.ReplicationPad2d", + { + "padding": "REQUIRED" + } + ], + "ms2pt_mapping": { + "padding": "padding" + }, + "gen_explicit_map": null + }, + "nn.ReplicationPad3d": { + "ms_api": [ + "nn.ReplicationPad3d", + { + "padding": "REQUIRED" + } + ], + "pt_api": [ + "nn.ReplicationPad3d", + { + "padding": "REQUIRED" + } + ], + "ms2pt_mapping": { + "padding": "padding" + }, + "gen_explicit_map": null + }, + "nn.RNN": { + "ms_api": [ + "nn.RNN", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "pt_api": [ + "nn.RNN", + { + "self": "REQUIRED", + "input_size": "REQUIRED", + "hidden_size": "REQUIRED", + "num_layers": 1.0, + "nonlinearity": "tanh", + "bias": true, + "batch_first": false, + "dropout": 0.0, + "bidirectional": false, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.RNNCell": { + "ms_api": [ + "nn.RNNCell", + { + "input_size": "REQUIRED", + "hidden_size": "REQUIRED", + "has_bias": true, + "nonlinearity": "tanh", + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.RNNCell", + { + "input_size": "REQUIRED", + "hidden_size": "REQUIRED", + "bias": true, + "nonlinearity": "tanh", + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "input_size": "input_size", + "hidden_size": "hidden_size", + "has_bias": "bias", + "nonlinearity: str ": "nonlinearity", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.RReLU": { + "ms_api": [ + "nn.RReLU", + { + "lower": "1 / 8", + "upper": "1 / 3" + } + ], + "pt_api": [ + "nn.RReLU", + { + "lower": 0.125, + "upper": 0.3333333333333333, + "inplace": false + } + ], + "ms2pt_mapping": { + "lower": "lower", + "upper": "upper" + }, + "gen_explicit_map": null + }, + "nn.SeLU": { + "ms_api": [ + "nn.SeLU", + {} + ], + "pt_api": [ + "nn.SeLU", + { + "inplace": false + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.Sequential": { + "ms_api": [ + "nn.SequentialCell", + { + "*args": "REQUIRED" + } + ], + "pt_api": [ + "nn.Sequential", + { + "*args": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + "nn.Sigmoid": { + "ms_api": [ + "nn.Sigmoid", + { + "input_x": "REQUIRED" + } + ], + "pt_api": [ + "nn.Sigmoid", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.SiLU": { + "ms_api": [ + "nn.SiLU", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "nn.SiLU", + { + "inplace": false + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.SmoothL1Loss": { + "ms_api": [ + "nn.SmoothL1Loss", + { + "beta": 1.0, + "reduction": "none" + } + ], + "pt_api": [ + "nn.SmoothL1Loss", + { + "size_average": null, + "reduce": null, + "reduction": "mean", + "beta": 1.0 + } + ], + "ms2pt_mapping": { + "beta": "beta", + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.SoftMarginLoss": { + "ms_api": [ + "nn.SoftMarginLoss", + { + "reduction": "mean" + } + ], + "pt_api": [ + "nn.SoftMarginLoss", + { + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "reduction": "reduction" + }, + "gen_explicit_map": null + }, + "nn.Softmax": { + "ms_api": [ + "nn.Softmax", + { + "axis": "- 1" + } + ], + "pt_api": [ + "nn.Softmax", + { + "dim": null + } + ], + "ms2pt_mapping": { + "axis": "dim" + }, + "gen_explicit_map": null + }, + "nn.Softmin": { + "ms_api": [ + "nn.Softmin", + { + "axis": "- 1" + } + ], + "pt_api": [ + "nn.Softmin", + { + "dim": null + } + ], + "ms2pt_mapping": { + "axis": "dim" + }, + "gen_explicit_map": null + }, + "nn.Softmax2d": { + "ms_api": [ + "nn.Softmax2d", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "nn.Softmax2d", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.Softshrink": { + "ms_api": [ + "nn.SoftShrink", + { + "lambd": 0.5 + } + ], + "pt_api": [ + "nn.Softshrink", + { + "lambd": 0.5 + } + ], + "ms2pt_mapping": { + "lambd": "lambd" + }, + "gen_explicit_map": null + }, + "nn.Softsign": { + "ms_api": [ + "nn.Softsign", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "nn.Softsign", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.SyncBatchNorm": { + "ms_api": [ + "nn.SyncBatchNorm", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.9, + "affine": true, + "gamma_init": "ones", + "beta_init": "zeros", + "moving_mean_init": "zeros", + "moving_var_init": "ones", + "use_batch_statistics": null, + "process_groups": null, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.SyncBatchNorm", + { + "num_features": "REQUIRED", + "eps": 1e-05, + "momentum": 0.1, + "affine": true, + "track_running_stats": true, + "process_group": null, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "num_features": "num_features", + "eps": "eps", + "momentum": "momentum", + "affine": "affine", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.Tanh": { + "ms_api": [ + "nn.Tanh", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "nn.Tanh", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.Tanhshrink": { + "ms_api": [ + "nn.Tanhshrink", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "nn.Tanhshrink", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.Threshold": { + "ms_api": [ + "nn.Threshold", + { + "threshold": "REQUIRED", + "value": "REQUIRED" + } + ], + "pt_api": [ + "nn.Threshold", + { + "threshold": "REQUIRED", + "value": "REQUIRED", + "inplace": false + } + ], + "ms2pt_mapping": { + "threshold": "threshold", + "value": "value" + }, + "gen_explicit_map": null + }, + "nn.Transformer": { + "ms_api": [ + "nn.Transformer", + { + "d_model": 512.0, + "nhead": 8.0, + "num_encoder_layers": 6.0, + "num_decoder_layers": 6.0, + "dim_feedforward": 2048.0, + "dropout": 0.1, + "activation": "relu", + "custom_encoder": null, + "custom_decoder": null, + "layer_norm_eps": 1e-05, + "batch_first": false, + "norm_first": false, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.Transformer", + { + "d_model": 512.0, + "nhead": 8.0, + "num_encoder_layers": 6.0, + "num_decoder_layers": 6.0, + "dim_feedforward": 2048.0, + "dropout": 0.1, + "activation": "relu", + "custom_encoder": null, + "custom_decoder": null, + "layer_norm_eps": 1e-05, + "batch_first": false, + "norm_first": false, + "bias": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "d_model": "d_model", + "nhead": "nhead", + "num_encoder_layers": "num_encoder_layers", + "num_decoder_layers": "num_decoder_layers", + "dim_feedforward": "dim_feedforward", + "dropout": "dropout", + "activation": "activation", + "layer_norm_eps": "norm_first", + "batch_first": "batch_first", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.TransformerDecoder": { + "ms_api": [ + "nn.TransformerDecoder", + { + "decoder_layer": "REQUIRED", + "num_layers": "REQUIRED", + "norm": null + } + ], + "pt_api": [ + "nn.TransformerDecoder", + { + "decoder_layer": "REQUIRED", + "num_layers": "REQUIRED", + "norm": null + } + ], + "ms2pt_mapping": { + "decoder_layer": "decoder_layer", + "num_layers": "num_layers", + "norm": "norm" + }, + "gen_explicit_map": null + }, + "nn.TransformerEncoder": { + "ms_api": [ + "nn.TransformerEncoder", + { + "encoder_layer": "REQUIRED", + "num_layers": "REQUIRED", + "norm": null + } + ], + "pt_api": [ + "nn.TransformerEncoder", + { + "encoder_layer": "REQUIRED", + "num_layers": "REQUIRED", + "norm": null, + "enable_nested_tensor": true, + "mask_check": true + } + ], + "ms2pt_mapping": { + "encoder_layer": "encoder_layer", + "num_layers": "num_layers", + "norm": "norm" + }, + "gen_explicit_map": null + }, + "nn.TransformerDecoderLayer": { + "ms_api": [ + "nn.TransformerDecoderLayer", + { + "d_model": "REQUIRED", + "nhead": "REQUIRED", + "dim_feedforward": 2048.0, + "dropout": 0.1, + "activation": "relu", + "layer_norm_eps": 1e-05, + "batch_first": false, + "norm_first": false, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.TransformerDecoderLayer", + { + "d_model": "REQUIRED", + "nhead": "REQUIRED", + "dim_feedforward": 2048.0, + "dropout": 0.1, + "activation": "relu", + "layer_norm_eps": 1e-05, + "batch_first": false, + "norm_first": false, + "bias": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "d_model": "d_model", + "nhead": "nhead", + "dim_feedforward": "dim_feedforward", + "dropout": "dropout", + "activation": "activation", + "layer_norm_eps": "norm_first", + "batch_first": "batch_first", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "nn.TransformerEncoderLayer": { + "ms_api": [ + "nn.TransformerEncoderLayer", + { + "d_model": "REQUIRED", + "nhead": "REQUIRED", + "dim_feedforward": 2048.0, + "dropout": 0.1, + "activation": "relu", + "layer_norm_eps": 1e-05, + "batch_first": false, + "norm_first": false, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "nn.TransformerEncoderLayer", + { + "d_model": "REQUIRED", + "nhead": "REQUIRED", + "dim_feedforward": 2048.0, + "dropout": 0.1, + "activation": "relu", + "layer_norm_eps": 1e-05, + "batch_first": false, + "norm_first": false, + "bias": true, + "device": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "d_model": "d_model", + "nhead": "nhead", + "dim_feedforward": "dim_feedforward", + "dropout": "dropout", + "activation": "activation", + "layer_norm_eps": "layer_norm_eps", + "batch_first": "batch_first", + "norm_first": "norm_first" + }, + "gen_explicit_map": null + }, + "nn.TripletMarginLoss": { + "ms_api": [ + "nn.TripletMarginLoss", + { + "p": 2.0, + "swap": false, + "eps": 1e-06, + "reduction": "mean", + "margin": 1.0 + } + ], + "pt_api": [ + "nn.TripletMarginLoss", + { + "margin": 1.0, + "p": 2.0, + "eps": 1e-06, + "swap": false, + "size_average": null, + "reduce": null, + "reduction": "mean" + } + ], + "ms2pt_mapping": { + "p": "p", + "swap": "swap", + "eps": "eps", + "reduction": "reduction", + "margin": "margin" + }, + "gen_explicit_map": null + }, + "nn.Unflatten": { + "ms_api": [ + "nn.Unflatten", + { + "axis": "REQUIRED", + "unflattened_size": "REQUIRED" + } + ], + "pt_api": [ + "nn.Unflatten", + { + "dim": "REQUIRED", + "unflattened_size": "REQUIRED" + } + ], + "ms2pt_mapping": { + "axis": "dim", + "unflattened_size": "unflattened_size" + }, + "gen_explicit_map": null + }, + "nn.Unfold": { + "ms_api": [ + "nn.Unfold", + { + "ksizes": "REQUIRED", + "strides": "REQUIRED", + "rates": "REQUIRED", + "padding": "valid" + } + ], + "pt_api": [ + "nn.Unfold", + { + "kernel_size": "REQUIRED", + "dilation": 1.0, + "padding": 0.0, + "stride": 1.0 + } + ], + "ms2pt_mapping": { + "ksizes": "kernel_size", + "padding": "padding" + }, + "gen_explicit_map": null + }, + "nn.Upsample": { + "ms_api": [ + "nn.Upsample", + { + "size": null, + "scale_factor": null, + "mode": "nearest", + "align_corners": null, + "recompute_scale_factor": null + } + ], + "pt_api": [ + "nn.Upsample", + { + "size": null, + "scale_factor": null, + "mode": "nearest", + "align_corners": null, + "recompute_scale_factor": null + } + ], + "ms2pt_mapping": { + "size": "size", + "scale_factor": "scale_factor", + "mode": "mode", + "align_corners": "align_corners", + "recompute_scale_factor": "recompute_scale_factor" + }, + "gen_explicit_map": null + }, + "nn.ZeroPad2d": { + "ms_api": [ + "nn.ZeroPad2d", + { + "padding": "REQUIRED" + } + ], + "pt_api": [ + "nn.ZeroPad2d", + { + "padding": "REQUIRED" + } + ], + "ms2pt_mapping": { + "padding": "padding" + }, + "gen_explicit_map": null + }, + "nn.Module.apply": { + "ms_api": [ + "nn.Cell.apply", + {} + ], + "pt_api": [ + "nn.Module.apply", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "nn.utils.clip_grad_value_": { + "ms_api": [ + "ops.clip_by_value", + { + "x": "REQUIRED", + "clip_value_min": null, + "clip_value_max": null + } + ], + "pt_api": [ + "nn.utils.clip_grad_value_", + { + "parameters": "REQUIRED", + "clip_value": "REQUIRED", + "foreach": null + } + ], + "ms2pt_mapping": { + "x": "parameters" + }, + "gen_explicit_map": null + }, + "nn.utils.clip_grad_norm_": { + "ms_api": [ + "ops.clip_by_norm", + { + "x": "REQUIRED", + "max_norm": "REQUIRED", + "norm_type": 2.0, + "error_if_nonfinite": false + } + ], + "pt_api": [ + "nn.utils.clip_grad_norm_", + { + "parameters": "REQUIRED", + "max_norm": "REQUIRED", + "error_if_nonfinite": false, + "foreach": null + } + ], + "ms2pt_mapping": { + "x": "parameters", + "max_norm": "max_norm", + "error_if_nonfinite": "error_if_nonfinite" + }, + "gen_explicit_map": null + } +} \ No newline at end of file diff --git a/mindconverter/mindconverter/mappings/tensor_dot_mappings.json b/mindconverter/mindconverter/mappings/tensor_dot_mappings.json new file mode 100644 index 0000000000000000000000000000000000000000..ab583284d18183dc0a7e204728291fe40dfe0a09 --- /dev/null +++ b/mindconverter/mindconverter/mappings/tensor_dot_mappings.json @@ -0,0 +1,3601 @@ +{ + ".abs": { + "ms_api": [ + ".abs", + {} + ], + "pt_api": [ + ".abs", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".absolute": { + "ms_api": [ + ".absolute", + {} + ], + "pt_api": [ + ".absolute", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".acos": { + "ms_api": [ + ".acos", + {} + ], + "pt_api": [ + ".acos", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".acosh": { + "ms_api": [ + ".acosh", + {} + ], + "pt_api": [ + ".acosh", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".add": { + "ms_api": [ + ".add", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".add", + { + "other": "REQUIRED", + "alpha": 1.0 + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".addbmm": { + "ms_api": [ + ".addbmm", + { + "batch1": "REQUIRED", + "batch2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "pt_api": [ + ".addbmm", + { + "batch1": "REQUIRED", + "batch2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "ms2pt_mapping": { + "batch1": "batch1", + "batch2": "batch2", + "beta": "beta", + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + ".addcdiv": { + "ms_api": [ + ".addcdiv", + { + "tensor1": "REQUIRED", + "tensor2": "REQUIRED", + "value": 1.0 + } + ], + "pt_api": [ + ".addcdiv", + { + "tensor1": "REQUIRED", + "tensor2": "REQUIRED", + "value": 1.0 + } + ], + "ms2pt_mapping": { + "tensor1": "tensor1", + "tensor2": "tensor2", + "value": "value" + }, + "gen_explicit_map": null + }, + ".addcmul": { + "ms_api": [ + ".addcmul", + { + "tensor1": "REQUIRED", + "tensor2": "REQUIRED", + "value": 1.0 + } + ], + "pt_api": [ + ".addcmul", + { + "tensor1": "REQUIRED", + "tensor2": "REQUIRED", + "value": 1.0 + } + ], + "ms2pt_mapping": { + "tensor1": "tensor1", + "tensor2": "tensor2", + "value": "value" + }, + "gen_explicit_map": null + }, + ".addmm": { + "ms_api": [ + ".addmm", + { + "mat1": "REQUIRED", + "mat2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "pt_api": [ + ".addmm", + { + "mat1": "REQUIRED", + "mat2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "ms2pt_mapping": { + "mat1": "mat1", + "mat2": "mat2", + "beta": "beta", + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + ".addmv": { + "ms_api": [ + ".addmv", + { + "mat": "REQUIRED", + "vec": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "pt_api": [ + ".addmv", + { + "mat": "REQUIRED", + "vec": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".addr": { + "ms_api": [ + ".addr", + { + "vec1": "REQUIRED", + "vec2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "pt_api": [ + ".addr", + { + "vec1": "REQUIRED", + "vec2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".all": { + "ms_api": [ + ".all", + { + "axis": null, + "keep_dims": false + } + ], + "pt_api": [ + ".all", + { + "dim": null, + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keep_dims": "keepdim" + }, + "gen_explicit_map": null + }, + ".amax": { + "ms_api": [ + ".amax", + { + "axis": null, + "keepdims": false, + "initial": null, + "where": null + } + ], + "pt_api": [ + ".amax", + { + "dim": null, + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + ".amin": { + "ms_api": [ + ".amin", + { + "axis": null, + "keepdims": false, + "initial": null, + "where": null + } + ], + "pt_api": [ + ".amin", + { + "dim": null, + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + ".any": { + "ms_api": [ + ".any", + { + "axis": null, + "keep_dims": false + } + ], + "pt_api": [ + ".any", + { + "dim": null, + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keep_dims": "keepdim" + }, + "gen_explicit_map": null + }, + ".arccos": { + "ms_api": [ + ".arccos", + {} + ], + "pt_api": [ + ".arccos", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".arccosh": { + "ms_api": [ + ".arccosh", + {} + ], + "pt_api": [ + ".arccosh", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".argmax": { + "ms_api": [ + ".argmax", + { + "axis": null, + "keepdims": false + } + ], + "pt_api": [ + ".argmax", + { + "dim": null, + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + ".angle": { + "ms_api": [ + ".angle", + {} + ], + "pt_api": [ + ".angle", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".arcsin": { + "ms_api": [ + ".arcsin", + {} + ], + "pt_api": [ + ".arcsin", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".arcsinh": { + "ms_api": [ + ".arcsinh", + {} + ], + "pt_api": [ + ".arcsinh", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".arctan": { + "ms_api": [ + ".arctan", + {} + ], + "pt_api": [ + ".arctan", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".arctanh": { + "ms_api": [ + ".arctanh", + {} + ], + "pt_api": [ + ".arctanh", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".argmin": { + "ms_api": [ + ".argmin", + { + "axis": null, + "keepdims": false + } + ], + "pt_api": [ + ".argmin", + { + "dim": null, + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + ".argsort": { + "ms_api": [ + ".argsort", + { + "axis": -1, + "descending": false + } + ], + "pt_api": [ + ".argsort", + { + "dim": -1.0, + "descending": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "descending": "descending" + }, + "gen_explicit_map": null + }, + ".asin": { + "ms_api": [ + ".asin", + {} + ], + "pt_api": [ + ".asin", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".asinh": { + "ms_api": [ + ".asinh", + {} + ], + "pt_api": [ + ".asinh", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".atan": { + "ms_api": [ + ".atan", + {} + ], + "pt_api": [ + ".atan", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".atan2": { + "ms_api": [ + ".atan2", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".atan2", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + "other": "other" + }, + "gen_explicit_map": null + }, + ".atanh": { + "ms_api": [ + ".atanh", + {} + ], + "pt_api": [ + ".atanh", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".baddbmm": { + "ms_api": [ + ".baddbmm", + { + "batch1": "REQUIRED", + "batch2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "pt_api": [ + ".baddbmm", + { + "batch1": "REQUIRED", + "batch2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".bernoulli": { + "ms_api": [ + ".bernoulli", + { + "p": 0.5, + "seed": null + } + ], + "pt_api": [ + ".bernoulli", + { + "generator": null + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".bincount": { + "ms_api": [ + ".bincount", + { + "weights": null, + "minlength": 0.0 + } + ], + "pt_api": [ + ".bincount", + { + "weights": null, + "minlength": 0.0 + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".bitwise_and": { + "ms_api": [ + ".bitwise_and", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".bitwise_and", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".bitwise_or": { + "ms_api": [ + ".bitwise_or", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".bitwise_or", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".bitwise_xor": { + "ms_api": [ + ".bitwise_xor", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".bitwise_xor", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".bmm": { + "ms_api": [ + ".bmm", + { + "mat2": "REQUIRED" + } + ], + "pt_api": [ + ".bmm", + { + "batch2": "REQUIRED" + } + ], + "ms2pt_mapping": { + "mat2": "batch2" + }, + "gen_explicit_map": null + }, + ".bool": { + "ms_api": [ + ".bool", + {} + ], + "pt_api": [ + ".bool", + { + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".broadcast_to": { + "ms_api": [ + ".broadcast_to", + { + "shape": "REQUIRED" + } + ], + "pt_api": [ + ".broadcast_to", + { + "shape": "REQUIRED" + } + ], + "ms2pt_mapping": { + "shape": "shape" + }, + "gen_explicit_map": null + }, + ".cauchy_": { + "ms_api": [ + ".cauchy", + { + "median": 0.0, + "sigma": 1.0 + } + ], + "pt_api": [ + ".cauchy_", + { + "median": 0.0, + "sigma": 1.0, + "generator": null + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".ceil": { + "ms_api": [ + ".ceil", + {} + ], + "pt_api": [ + ".ceil", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".chunk": { + "ms_api": [ + ".chunk", + { + "chunks": "REQUIRED", + "axis": 0.0 + } + ], + "pt_api": [ + ".chunk", + { + "chunks": "REQUIRED", + "dim": 0.0 + } + ], + "ms2pt_mapping": { + "chunks": "chunks", + "axis": "dim" + }, + "gen_explicit_map": null + }, + ".cholesky": { + "ms_api": [ + ".cholesky", + { + "upper": false + } + ], + "pt_api": [ + ".cholesky", + { + "upper": false + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".cholesky_solve": { + "ms_api": [ + ".cholesky_solve", + { + "input2": "REQUIRED", + "upper": false + } + ], + "pt_api": [ + ".cholesky_solve", + { + "input2": "REQUIRED", + "upper": false + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".clamp": { + "ms_api": [ + ".clamp", + { + "min": null, + "max": null + } + ], + "pt_api": [ + ".clamp", + { + "min": null, + "max": null + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".clip": { + "ms_api": [ + ".clip", + { + "min": null, + "max": null + } + ], + "pt_api": [ + ".clip", + { + "min": null, + "max": null + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".conj": { + "ms_api": [ + ".conj", + {} + ], + "pt_api": [ + ".conj", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".copysign": { + "ms_api": [ + ".copysign", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".copysign", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + "other": "other" + }, + "gen_explicit_map": null + }, + ".cos": { + "ms_api": [ + ".cos", + {} + ], + "pt_api": [ + ".cos", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".cosh": { + "ms_api": [ + ".cosh", + {} + ], + "pt_api": [ + ".cosh", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".cross": { + "ms_api": [ + ".cross", + { + "other": "REQUIRED", + "dim": null + } + ], + "pt_api": [ + ".cross", + { + "other": "REQUIRED", + "dim": null + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".cummax": { + "ms_api": [ + ".cummax", + { + "axis": "REQUIRED" + } + ], + "pt_api": [ + ".cummax", + { + "dim": "REQUIRED" + } + ], + "ms2pt_mapping": { + "axis": "dim" + }, + "gen_explicit_map": null + }, + ".cummin": { + "ms_api": [ + ".cummin", + { + "axis": "REQUIRED" + } + ], + "pt_api": [ + ".cummin", + { + "dim": "REQUIRED" + } + ], + "ms2pt_mapping": { + "axis": "dim" + }, + "gen_explicit_map": null + }, + ".cumprod": { + "ms_api": [ + ".cumprod", + { + "dim": "REQUIRED", + "dtype": null + } + ], + "pt_api": [ + ".cumprod", + { + "dim": "REQUIRED", + "dtype": null + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".cumsum": { + "ms_api": [ + ".cumsum", + { + "axis": null, + "dtype": null + } + ], + "pt_api": [ + ".cumsum", + { + "dim": "REQUIRED", + "dtype": null + } + ], + "ms2pt_mapping": { + "axis": "dim", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + ".deg2rad": { + "ms_api": [ + ".deg2rad", + {} + ], + "pt_api": [ + ".deg2rad", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".diag": { + "ms_api": [ + ".diag", + {} + ], + "pt_api": [ + ".diag", + { + "diagonal": 0.0 + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".diagflat": { + "ms_api": [ + ".diagflat", + { + "offset": 0.0 + } + ], + "pt_api": [ + ".diagflat", + { + "offset": 0.0 + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".diff": { + "ms_api": [ + ".diff", + { + "n": 1.0, + "axis": "- 1", + "prepend": null, + "append": null + } + ], + "pt_api": [ + ".diff", + { + "n": 1.0, + "dim": -1.0, + "prepend": null, + "append": null + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".digamma": { + "ms_api": [ + ".digamma", + {} + ], + "pt_api": [ + ".digamma", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".dim": { + "ms_api": [ + ".ndimension", + {} + ], + "pt_api": [ + ".dim", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".div": { + "ms_api": [ + ".div", + { + "value": "REQUIRED", + "rounding_mode": null + } + ], + "pt_api": [ + ".div", + { + "value": "REQUIRED", + "rounding_mode": null + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".divide": { + "ms_api": [ + ".divide", + { + "value": "REQUIRED", + "rounding_mode": null + } + ], + "pt_api": [ + ".divide", + { + "value": "REQUIRED", + "rounding_mode": null + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".eq": { + "ms_api": [ + ".equal", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".eq", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".erf": { + "ms_api": [ + ".erf", + {} + ], + "pt_api": [ + ".erf", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".erfc": { + "ms_api": [ + ".erfc", + {} + ], + "pt_api": [ + ".erfc", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".erfinv": { + "ms_api": [ + ".erfinv", + {} + ], + "pt_api": [ + ".erfinv", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".exp": { + "ms_api": [ + ".exp", + {} + ], + "pt_api": [ + ".exp", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".expand": { + "ms_api": [ + ".broadcast_to", + { + "shape": "REQUIRED" + } + ], + "pt_api": [ + ".expand", + { + "*sizes": "REQUIRED" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".expand_as": { + "ms_api": [ + ".expand_as", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + ".expand_as", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".expm1": { + "ms_api": [ + ".expm1", + {} + ], + "pt_api": [ + ".expm1", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".fill_diagonal_": { + "ms_api": [ + ".fill_diagonal", + { + "fill_value": "REQUIRED", + "wrap": false + } + ], + "pt_api": [ + ".fill_diagonal_", + { + "fill_value": "REQUIRED", + "wrap": false + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".flip": { + "ms_api": [ + ".flip", + { + "dims": "REQUIRED" + } + ], + "pt_api": [ + ".flip", + { + "dims": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".fliplr": { + "ms_api": [ + ".fliplr", + {} + ], + "pt_api": [ + ".fliplr", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".flipud": { + "ms_api": [ + ".flipud", + {} + ], + "pt_api": [ + ".flipud", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".float": { + "ms_api": [ + ".float", + {} + ], + "pt_api": [ + ".float", + { + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".float_power": { + "ms_api": [ + ".float_power", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".float_power", + { + "exponent": "REQUIRED" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".floor": { + "ms_api": [ + ".floor", + {} + ], + "pt_api": [ + ".floor", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".floor_divide": { + "ms_api": [ + ".floor_divide", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".floor_divide", + { + "value": "REQUIRED" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".fmax": { + "ms_api": [ + ".fmax", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".fmax", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".fmod": { + "ms_api": [ + ".fmod", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".fmod", + { + "divisor": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".frac": { + "ms_api": [ + ".frac", + {} + ], + "pt_api": [ + ".frac", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".gather": { + "ms_api": [ + ".gather_elements", + { + "dim": "REQUIRED", + "index": "REQUIRED" + } + ], + "pt_api": [ + ".gather", + { + "dim": "REQUIRED", + "index": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".ge": { + "ms_api": [ + ".ge", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + ".ge", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".geqrf": { + "ms_api": [ + ".geqrf", + {} + ], + "pt_api": [ + ".geqrf", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".ger": { + "ms_api": [ + ".ger", + { + "vec2": "REQUIRED" + } + ], + "pt_api": [ + ".ger", + { + "vec2": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".greater": { + "ms_api": [ + ".greater", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".greater", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".greater_equal": { + "ms_api": [ + ".greater_equal", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".greater_equal", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".gt": { + "ms_api": [ + ".gt", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + ".gt", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".H": { + "ms_api": [ + ".H", + {} + ], + "pt_api": [ + ".H", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".half": { + "ms_api": [ + ".half", + {} + ], + "pt_api": [ + ".half", + { + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".hardshrink": { + "ms_api": [ + ".hardshrink", + { + "lambd": 0.5 + } + ], + "pt_api": [ + ".hardshrink", + { + "lambd": 0.5 + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".heaviside": { + "ms_api": [ + ".heaviside", + { + "values": "REQUIRED" + } + ], + "pt_api": [ + ".heaviside", + { + "values": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".histc": { + "ms_api": [ + ".histc", + { + "bins": 100.0, + "min": 0.0, + "max": 0.0 + } + ], + "pt_api": [ + ".histc", + { + "bins": 100.0, + "min": 0.0, + "max": 0.0 + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".hypot": { + "ms_api": [ + ".hypot", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".hypot", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".i0": { + "ms_api": [ + ".i0", + {} + ], + "pt_api": [ + ".i0", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".igamma": { + "ms_api": [ + ".igamma", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".igamma", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".igammac": { + "ms_api": [ + ".igammac", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".igammac", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".imag": { + "ms_api": [ + ".imag", + {} + ], + "pt_api": [ + ".imag", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".index_add": { + "ms_api": [ + ".index_add", + { + "dim": "REQUIRED", + "index": "REQUIRED", + "source": "REQUIRED", + "alpha": 1.0 + } + ], + "pt_api": [ + ".index_add", + { + "dim": "REQUIRED", + "index": "REQUIRED", + "source": "REQUIRED", + "alpha": 1.0 + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".index_fill": { + "ms_api": [ + ".index_fill", + { + "axis": "REQUIRED", + "index": "REQUIRED", + "value": "REQUIRED" + } + ], + "pt_api": [ + ".index_fill", + { + "dim": "REQUIRED", + "index": "REQUIRED", + "value": "REQUIRED" + } + ], + "ms2pt_mapping": { + "axis": "dim", + "index": "index", + "value": "value" + }, + "gen_explicit_map": null + }, + ".index_put": { + "ms_api": [ + ".index_put", + { + "indices": "REQUIRED", + "values": "REQUIRED", + "accumulate": false + } + ], + "pt_api": [ + ".index_put", + { + "indices": "REQUIRED", + "values": "REQUIRED", + "accumulate": false + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".index_select": { + "ms_api": [ + ".index_select", + { + "axis": "REQUIRED", + "index": "REQUIRED" + } + ], + "pt_api": [ + ".index_select", + { + "dim": "REQUIRED", + "index": "REQUIRED" + } + ], + "ms2pt_mapping": { + "axis": "dim", + "index": "index" + }, + "gen_explicit_map": null + }, + ".inner": { + "ms_api": [ + ".inner", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".inner", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".int": { + "ms_api": [ + ".int", + {} + ], + "pt_api": [ + ".int", + { + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".inverse": { + "ms_api": [ + ".inverse", + {} + ], + "pt_api": [ + ".inverse", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".isclose": { + "ms_api": [ + ".isclose", + { + "x2": "REQUIRED", + "rtol": 1e-05, + "atol": 1e-08, + "equal_nan": false + } + ], + "pt_api": [ + ".isclose", + { + "other": "REQUIRED", + "rtol": 1e-05, + "atol": 1e-08, + "equal_nan": false + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".isfinite": { + "ms_api": [ + ".isfinite", + {} + ], + "pt_api": [ + ".isfinite", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".isinf": { + "ms_api": [ + ".isinf", + {} + ], + "pt_api": [ + ".isinf", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".isnan": { + "ms_api": [ + ".isnan", + {} + ], + "pt_api": [ + ".isnan", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".is_complex": { + "ms_api": [ + ".is_complex", + {} + ], + "pt_api": [ + ".is_complex", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".is_floating_point": { + "ms_api": [ + ".is_floating_point", + {} + ], + "pt_api": [ + ".is_floating_point", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".is_signed": { + "ms_api": [ + ".is_signed", + {} + ], + "pt_api": [ + ".is_signed", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".isneginf": { + "ms_api": [ + ".isneginf", + {} + ], + "pt_api": [ + ".isneginf", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".isposinf": { + "ms_api": [ + ".isposinf", + {} + ], + "pt_api": [ + ".isposinf", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".isreal": { + "ms_api": [ + ".isreal", + {} + ], + "pt_api": [ + ".isreal", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".item": { + "ms_api": [ + ".item", + { + "index": null + } + ], + "pt_api": [ + ".item", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".lcm": { + "ms_api": [ + ".lcm", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".lcm", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".ldexp": { + "ms_api": [ + ".ldexp", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".ldexp", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".le": { + "ms_api": [ + ".le", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".le", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".lerp": { + "ms_api": [ + ".lerp", + { + "end": "REQUIRED", + "weight": "REQUIRED" + } + ], + "pt_api": [ + ".lerp", + { + "end": "REQUIRED", + "weight": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".less": { + "ms_api": [ + ".less", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".less", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".less_equal": { + "ms_api": [ + ".less_equal", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".less_equal", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".log": { + "ms_api": [ + ".log", + {} + ], + "pt_api": [ + ".log", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".log_normal": { + "ms_api": [ + ".log_normal", + { + "mean": 1.0, + "std": 2.0 + } + ], + "pt_api": [ + ".log_normal", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".log10": { + "ms_api": [ + ".log10", + {} + ], + "pt_api": [ + ".log10", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".log1p": { + "ms_api": [ + ".log1p", + {} + ], + "pt_api": [ + ".log1p", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".log2": { + "ms_api": [ + ".log2", + {} + ], + "pt_api": [ + ".log2", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".logaddexp": { + "ms_api": [ + ".logaddexp", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".logaddexp", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".logaddexp2": { + "ms_api": [ + ".logaddexp2", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".logaddexp2", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".logcumsumexp": { + "ms_api": [ + ".logcumsumexp", + { + "axis": "REQUIRED" + } + ], + "pt_api": [ + ".logcumsumexp", + { + "dim": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".logdet": { + "ms_api": [ + ".logdet", + {} + ], + "pt_api": [ + ".logdet", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".logical_and": { + "ms_api": [ + ".logical_and", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".logical_and", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".logical_not": { + "ms_api": [ + ".logical_not", + {} + ], + "pt_api": [ + ".logical_not", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".logical_or": { + "ms_api": [ + ".logical_or", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".logical_or", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".logical_xor": { + "ms_api": [ + ".logical_xor", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".logical_xor", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".logit": { + "ms_api": [ + ".logit", + { + "eps": null + } + ], + "pt_api": [ + ".logit", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".logsumexp": { + "ms_api": [ + ".logsumexp", + { + "axis": "REQUIRED", + "keepdims": false + } + ], + "pt_api": [ + ".logsumexp", + { + "dim": "REQUIRED", + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + ".long": { + "ms_api": [ + ".long", + {} + ], + "pt_api": [ + ".long", + { + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".lt": { + "ms_api": [ + ".lt", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".lt", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".lu_solve": { + "ms_api": [ + ".lu_solve", + { + "LU_data": "REQUIRED", + "LU_pivots": "REQUIRED" + } + ], + "pt_api": [ + ".lu_solve", + { + "LU_data": "REQUIRED", + "LU_pivots": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".masked_fill": { + "ms_api": [ + ".masked_fill", + { + "mask": "REQUIRED", + "value": "REQUIRED" + } + ], + "pt_api": [ + ".masked_fill", + { + "mask": "REQUIRED", + "value": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".masked_scatter": { + "ms_api": [ + ".masked_scatter", + { + "mask": "REQUIRED", + "x": "REQUIRED" + } + ], + "pt_api": [ + ".masked_scatter", + { + "mask": "REQUIRED", + "tensor": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".masked_select": { + "ms_api": [ + ".masked_select", + { + "mask": "REQUIRED" + } + ], + "pt_api": [ + ".masked_select", + { + "mask": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".matmul": { + "ms_api": [ + ".matmul", + { + "tensor2": "REQUIRED" + } + ], + "pt_api": [ + ".matmul", + { + "tensor2": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".max": { + "ms_api": [ + ".max", + { + "axis": null, + "keepdims": false, + "initial": null, + "where": true, + "return_indices": false + } + ], + "pt_api": [ + ".max", + { + "dim": null, + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + ".maximum": { + "ms_api": [ + ".maximum", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".maximum", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".mean": { + "ms_api": [ + ".mean", + { + "axis": null, + "keep_dims": false + } + ], + "pt_api": [ + ".mean", + { + "dim": null, + "keepdim": false, + "dtype": null + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keep_dims": "keepdim" + }, + "gen_explicit_map": null + }, + ".median": { + "ms_api": [ + ".median", + { + "axis": "- 1", + "keepdims": false + } + ], + "pt_api": [ + ".median", + { + "dim": null, + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + ".mH": { + "ms_api": [ + ".mH", + {} + ], + "pt_api": [ + ".mH", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".min": { + "ms_api": [ + ".min", + { + "axis": null, + "keepdims": false, + "initial": null, + "where": true, + "return_indices": false + } + ], + "pt_api": [ + ".min", + { + "dim": null, + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + ".minimum": { + "ms_api": [ + ".minimum", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".minimum", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".mm": { + "ms_api": [ + ".mm", + { + "mat2": "REQUIRED" + } + ], + "pt_api": [ + ".mm", + { + "mat2": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".moveaxis": { + "ms_api": [ + ".moveaxis", + { + "source": "REQUIRED", + "destination": "REQUIRED" + } + ], + "pt_api": [ + ".moveaxis", + { + "source": "REQUIRED", + "destination": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".movedim": { + "ms_api": [ + ".movedim", + { + "source": "REQUIRED", + "destination": "REQUIRED" + } + ], + "pt_api": [ + ".movedim", + { + "source": "REQUIRED", + "destination": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".msort": { + "ms_api": [ + ".msort", + {} + ], + "pt_api": [ + ".msort", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".mT": { + "ms_api": [ + ".mT", + {} + ], + "pt_api": [ + ".mT", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".mul": { + "ms_api": [ + ".mul", + { + "value": "REQUIRED" + } + ], + "pt_api": [ + ".mul", + { + "value": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".multinomial": { + "ms_api": [ + ".multinomial", + { + "num_samples": "REQUIRED", + "replacement": true, + "seed": null + } + ], + "pt_api": [ + ".multinomial", + { + "num_samples": "REQUIRED", + "replacement": false, + "generator": null + } + ], + "ms2pt_mapping": { + "num_samples": "num_samples", + "replacement": "replacement", + "seed": "generator" + }, + "gen_explicit_map": null + }, + ".multiply": { + "ms_api": [ + ".multiply", + { + "value": "REQUIRED" + } + ], + "pt_api": [ + ".multiply", + { + "value": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".mvlgamma": { + "ms_api": [ + ".mvlgamma", + { + "p": "REQUIRED" + } + ], + "pt_api": [ + ".mvlgamma", + { + "p": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".nan_to_num": { + "ms_api": [ + ".nan_to_num", + { + "nan": 0.0, + "posinf": null, + "neginf": null + } + ], + "pt_api": [ + ".nan_to_num", + { + "nan": 0.0, + "posinf": null, + "neginf": null + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".nanmedian": { + "ms_api": [ + ".nanmedian", + { + "axis": "- 1", + "keepdims": false + } + ], + "pt_api": [ + ".nanmedian", + { + "dim": null, + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + ".nansum": { + "ms_api": [ + ".nansum", + { + "axis": null, + "keepdims": false, + "dtype": null + } + ], + "pt_api": [ + ".nansum", + { + "dim": null, + "keepdim": false, + "dtype": null + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keepdims": "keepdim", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + ".narrow": { + "ms_api": [ + ".narrow", + { + "axis": "REQUIRED", + "start": "REQUIRED", + "length": "REQUIRED" + } + ], + "pt_api": [ + ".narrow", + { + "dimension": "REQUIRED", + "start": "REQUIRED", + "length": "REQUIRED" + } + ], + "ms2pt_mapping": { + "axis": "dimension", + "start": "start", + "length": "length" + }, + "gen_explicit_map": null + }, + ".ndim": { + "ms_api": [ + ".ndim", + {} + ], + "pt_api": [ + ".ndim", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".ndimension": { + "ms_api": [ + ".ndimension", + {} + ], + "pt_api": [ + ".ndimension", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".ne": { + "ms_api": [ + ".ne", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".ne", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + "other": "other" + }, + "gen_explicit_map": null + }, + ".neg": { + "ms_api": [ + ".neg", + {} + ], + "pt_api": [ + ".neg", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".negative": { + "ms_api": [ + ".negative", + {} + ], + "pt_api": [ + ".negative", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".nelement": { + "ms_api": [ + ".nelement", + {} + ], + "pt_api": [ + ".nelement", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".new_ones": { + "ms_api": [ + ".new_ones", + { + "size": "REQUIRED", + "dtype": null + } + ], + "pt_api": [ + ".new_ones", + { + "size": "REQUIRED", + "dtype": null, + "device": null, + "requires_grad": false, + "layout": "torch.strided", + "pin_memory": false + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".new_zeros": { + "ms_api": [ + ".new_zeros", + { + "size": "REQUIRED", + "dtype": null + } + ], + "pt_api": [ + ".new_zeros", + { + "size": "REQUIRED", + "dtype": null, + "device": null, + "requires_grad": false, + "layout": "torch.strided", + "pin_memory": false + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".nextafter": { + "ms_api": [ + ".nextafter", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".nextafter", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".norm": { + "ms_api": [ + ".norm", + { + "ord": null, + "dim": null, + "keepdim": false, + "dtype": null + } + ], + "pt_api": [ + ".norm", + { + "p": "'fro'", + "dim": null, + "keepdim": false, + "dtype": null + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".nonzero": { + "ms_api": [ + ".nonzero", + {} + ], + "pt_api": [ + ".nonzero", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".not_equal": { + "ms_api": [ + ".not_equal", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".not_equal", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".numel": { + "ms_api": [ + ".numel", + {} + ], + "pt_api": [ + ".numel", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".numpy": { + "ms_api": [ + ".asnumpy", + {} + ], + "pt_api": [ + ".numpy", + { + "force": false + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".orgqr": { + "ms_api": [ + ".orgqr", + { + "input2": "REQUIRED" + } + ], + "pt_api": [ + ".orgqr", + { + "input2": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".ormqr": { + "ms_api": [ + ".ormqr", + { + "input2": "REQUIRED", + "input3": "REQUIRED", + "left": true, + "transpose": false + } + ], + "pt_api": [ + ".ormqr", + { + "input2": "REQUIRED", + "input3": "REQUIRED", + "left": true, + "transpose": false + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".outer": { + "ms_api": [ + ".outer", + { + "vec2": "REQUIRED" + } + ], + "pt_api": [ + ".outer", + { + "vec2": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".permute": { + "ms_api": [ + ".permute", + { + "*axis": "REQUIRED" + } + ], + "pt_api": [ + ".permute", + { + "*dims": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".pow": { + "ms_api": [ + ".pow", + { + "exponent": "REQUIRED" + } + ], + "pt_api": [ + ".pow", + { + "exponent": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".prod": { + "ms_api": [ + ".prod", + { + "axis": null, + "keep_dims": false + } + ], + "pt_api": [ + ".prod", + { + "dim": null, + "keepdim": false, + "dtype": null + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keep_dims": "keepdim" + }, + "gen_explicit_map": null + }, + ".ravel": { + "ms_api": [ + ".ravel", + {} + ], + "pt_api": [ + ".ravel", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".real": { + "ms_api": [ + ".real", + {} + ], + "pt_api": [ + ".real", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".reciprocal": { + "ms_api": [ + ".reciprocal", + {} + ], + "pt_api": [ + ".reciprocal", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".remainder": { + "ms_api": [ + ".remainder", + { + "divisor": "REQUIRED" + } + ], + "pt_api": [ + ".remainder", + { + "divisor": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".renorm": { + "ms_api": [ + ".renorm", + { + "p": "REQUIRED", + "axis": "REQUIRED", + "maxnorm": "REQUIRED" + } + ], + "pt_api": [ + ".renorm", + { + "p": "REQUIRED", + "dim": "REQUIRED", + "maxnorm": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".rad2deg": { + "ms_api": [ + ".rad2deg", + {} + ], + "pt_api": [ + ".rad2deg", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".repeat": { + "ms_api": [ + ".tile", + { + "reps": "REQUIRED" + } + ], + "pt_api": [ + ".repeat", + { + "*sizes": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".repeat_interleave": { + "ms_api": [ + ".repeat_interleave", + { + "repeats": "REQUIRED", + "dim": null + } + ], + "pt_api": [ + ".repeat_interleave", + { + "repeats": "REQUIRED", + "dim": null, + "output_size": null + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".reshape": { + "ms_api": [ + ".reshape", + { + "*shape": "REQUIRED" + } + ], + "pt_api": [ + ".reshape", + { + "*shape": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".reshape_as": { + "ms_api": [ + ".reshape_as", + { + "other": "REQUIRED" + } + ], + "pt_api": [ + ".reshape_as", + { + "other": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".round": { + "ms_api": [ + ".round", + {} + ], + "pt_api": [ + ".round", + { + "decimals": 0.0 + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".roll": { + "ms_api": [ + ".roll", + { + "shifts": "REQUIRED", + "dims": "REQUIRED" + } + ], + "pt_api": [ + ".roll", + { + "shifts": "REQUIRED", + "dims": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".rot90": { + "ms_api": [ + ".rot90", + { + "k": "REQUIRED", + "dims": "REQUIRED" + } + ], + "pt_api": [ + ".rot90", + { + "k": "REQUIRED", + "dims": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".rsqrt": { + "ms_api": [ + ".rsqrt", + {} + ], + "pt_api": [ + ".rsqrt", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".sum_to_size": { + "ms_api": [ + ".sum_to_size", + { + "*size": "REQUIRED" + } + ], + "pt_api": [ + ".sum_to_size", + { + "*size": "REQUIRED" + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".scatter": { + "ms_api": [ + ".scatter", + { + "axis": "REQUIRED", + "index": "REQUIRED", + "src": "REQUIRED" + } + ], + "pt_api": [ + ".scatter", + { + "dim": "REQUIRED", + "index": "REQUIRED", + "src": "REQUIRED" + } + ], + "ms2pt_mapping": { + "axis": "dim", + "index": "index", + "src": "src" + }, + "gen_explicit_map": null + }, + ".sgn": { + "ms_api": [ + ".sgn", + {} + ], + "pt_api": [ + ".sgn", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".short": { + "ms_api": [ + ".short", + {} + ], + "pt_api": [ + ".short", + { + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".sigmoid": { + "ms_api": [ + ".sigmoid", + {} + ], + "pt_api": [ + ".sigmoid", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".sign": { + "ms_api": [ + ".sign", + {} + ], + "pt_api": [ + ".sign", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".signbit": { + "ms_api": [ + ".signbit", + {} + ], + "pt_api": [ + ".signbit", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".sin": { + "ms_api": [ + ".sin", + {} + ], + "pt_api": [ + ".sin", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".sinc": { + "ms_api": [ + ".sinc", + {} + ], + "pt_api": [ + ".sinc", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".sinh": { + "ms_api": [ + ".sinh", + {} + ], + "pt_api": [ + ".sinh", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".size": { + "ms_api": [ + ".shape", + {} + ], + "pt_api": [ + ".size", + { + "dim": null + } + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".slogdet": { + "ms_api": [ + ".slogdet", + {} + ], + "pt_api": [ + ".slogdet", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".sort": { + "ms_api": [ + ".sort", + { + "axis": "- 1", + "descending": false + } + ], + "pt_api": [ + ".sort", + { + "dim": -1.0, + "descending": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "descending": "descending" + }, + "gen_explicit_map": null + }, + ".split": { + "ms_api": [ + ".split", + { + "split_size_or_sections": "REQUIRED", + "axis": 0.0 + } + ], + "pt_api": [ + ".split", + { + "split_size": "REQUIRED", + "dim": 0.0 + } + ], + "ms2pt_mapping": { + "split_size_or_sections": "split_size", + "axis": "dim" + }, + "gen_explicit_map": null + }, + ".sqrt": { + "ms_api": [ + ".sqrt", + {} + ], + "pt_api": [ + ".sqrt", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".square": { + "ms_api": [ + ".square", + {} + ], + "pt_api": [ + ".square", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + ".squeeze": { + "ms_api": [ + ".squeeze", + { + "axis": null + } + ], + "pt_api": [ + ".squeeze", + { + "dim": null + } + ], + "ms2pt_mapping": { + "axis": "dim" + }, + "gen_explicit_map": null + }, + ".std": { + "ms_api": [ + ".std", + { + "axis": null, + "ddof": 0.0, + "keepdims": false + } + ], + "pt_api": [ + ".std", + { + "dim": null, + "correction": 1.0, + "keepdim": false + } + ], + "ms2pt_mapping": { + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + ".sub": { + "ms_api": [ + ".subtract", + { + "other": "REQUIRED", + "alpha": 1.0 + } + ], + "pt_api": [ + ".sub", + { + "other": "REQUIRED", + "alpha": 1.0 + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".subtract": { + "ms_api": [ + ".subtract", + { + "other": "REQUIRED", + "alpha": 1.0 + } + ], + "pt_api": [ + ".subtract", + { + "other": "REQUIRED", + "alpha": 1.0 + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + ".sum": { + "ms_api": [ + ".sum", + { + "axis": null, + "dtype": null, + "keepdims": false, + "initial": null + } + ], + "pt_api": [ + ".sum", + { + "dim": null, + "keepdim": false, + "dtype": null + } + ], + "ms2pt_mapping": { + "axis": "dim", + "dtype": "dtype" + }, + "gen_explicit_map": null + } +} \ No newline at end of file diff --git a/mindconverter/mindconverter/mappings/torch_dot_mappings.json b/mindconverter/mindconverter/mappings/torch_dot_mappings.json new file mode 100644 index 0000000000000000000000000000000000000000..a69cb1a482bb034b3c460e7fb437ea3c22d4183a --- /dev/null +++ b/mindconverter/mindconverter/mappings/torch_dot_mappings.json @@ -0,0 +1,6164 @@ +{ + "torch.abs": { + "ms_api": [ + "ops.abs", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.abs", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.absolute": { + "ms_api": [ + "ops.absolute", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.absolute", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.acos": { + "ms_api": [ + "ops.acos", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.acos", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.acosh": { + "ms_api": [ + "ops.acosh", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.acosh", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.add": { + "ms_api": [ + "ops.add", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.add", + { + "input": "REQUIRED", + "other": "REQUIRED", + "alpha": 1.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.addbmm": { + "ms_api": [ + "ops.addbmm", + { + "input": "REQUIRED", + "batch1": "REQUIRED", + "batch2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "pt_api": [ + "torch.addbmm", + { + "input": "REQUIRED", + "batch1": "REQUIRED", + "batch2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "batch1": "batch1", + "batch2": "batch2", + "beta": "beta", + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + "torch.addcdiv": { + "ms_api": [ + "ops.addcdiv", + { + "input": "REQUIRED", + "tensor1": "REQUIRED", + "tensor2": "REQUIRED", + "value": 1.0 + } + ], + "pt_api": [ + "torch.addcdiv", + { + "input": "REQUIRED", + "tensor1": "REQUIRED", + "tensor2": "REQUIRED", + "value": 1.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "tensor1": "tensor1", + "tensor2": "tensor2", + "value": "value" + }, + "gen_explicit_map": null + }, + "torch.addcmul": { + "ms_api": [ + "ops.addcmul", + { + "input": "REQUIRED", + "tensor1": "REQUIRED", + "tensor2": "REQUIRED", + "value": 1.0 + } + ], + "pt_api": [ + "torch.addcmul", + { + "input": "REQUIRED", + "tensor1": "REQUIRED", + "tensor2": "REQUIRED", + "value": 1.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "tensor1": "tensor1", + "tensor2": "tensor2", + "value": "value" + }, + "gen_explicit_map": null + }, + "torch.addmm": { + "ms_api": [ + "ops.addmm", + { + "input": "REQUIRED", + "mat1": "REQUIRED", + "mat2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "pt_api": [ + "torch.addmm", + { + "input": "REQUIRED", + "mat1": "REQUIRED", + "mat2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "mat1": "mat1", + "mat2": "mat2", + "beta": "beta", + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + "torch.addmv": { + "ms_api": [ + "ops.addmv", + { + "input": "REQUIRED", + "mat": "REQUIRED", + "vec": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "pt_api": [ + "torch.addmv", + { + "input": "REQUIRED", + "mat": "REQUIRED", + "vec": "REQUIRED", + "beta": 1.0, + "alpha": 1.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "mat": "mat", + "vec": "vec", + "beta": "beta", + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + "torch.addr": { + "ms_api": [ + "ops.addr", + { + "x": "REQUIRED", + "vec1": "REQUIRED", + "vec2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "pt_api": [ + "torch.addr", + { + "input": "REQUIRED", + "vec1": "REQUIRED", + "vec2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0, + "out": null + } + ], + "ms2pt_mapping": { + "x": "input", + "vec1": "vec1", + "vec2": "vec2", + "beta": "beta", + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + "torch.all": { + "ms_api": [ + "ops.all", + { + "input": "REQUIRED", + "axis": null, + "keep_dims": false + } + ], + "pt_api": [ + "torch.all", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.amax": { + "ms_api": [ + "ops.amax", + { + "input": "REQUIRED", + "axis": null, + "keepdims": false, + "initial": null, + "where": null + } + ], + "pt_api": [ + "torch.amax", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "keepdim": false, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + "torch.amin": { + "ms_api": [ + "ops.amin", + { + "input": "REQUIRED", + "axis": null, + "keepdims": false, + "initial": null, + "where": null + } + ], + "pt_api": [ + "torch.amin", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "keepdim": false, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + "torch.angle": { + "ms_api": [ + "ops.angle", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.angle", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.any": { + "ms_api": [ + "ops.any", + { + "input": "REQUIRED", + "axis": null, + "keep_dims": false + } + ], + "pt_api": [ + "torch.any", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.arange": { + "ms_api": [ + "ops.arange", + { + "start": 0.0, + "end": null, + "step": 1.0, + "dtype": null + } + ], + "pt_api": [ + "torch.arange", + { + "start": 0.0, + "end": "REQUIRED", + "step": 1.0, + "out": null, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "start": "start", + "end": "end", + "step": "step" + }, + "gen_explicit_map": null + }, + "torch.arccos": { + "ms_api": [ + "ops.arccos", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.arccos", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.arccosh": { + "ms_api": [ + "ops.arccosh", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.arccosh", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.arcsin": { + "ms_api": [ + "ops.arcsin", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "torch.arcsin", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "torch.arcsinh": { + "ms_api": [ + "ops.arcsinh", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.arcsinh", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.arctan": { + "ms_api": [ + "ops.arctan", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.arctan", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.arctanh": { + "ms_api": [ + "ops.arctanh", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.arctanh", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.argmax": { + "ms_api": [ + "ops.argmax", + { + "input": "REQUIRED", + "dim": null, + "keepdim": false + } + ], + "pt_api": [ + "torch.argmax", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.argmin": { + "ms_api": [ + "ops.argmin", + { + "input": "REQUIRED", + "axis": null, + "keepdims": false + } + ], + "pt_api": [ + "torch.argmin", + { + "input": "REQUIRED", + "dim": null, + "keepdim": false + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + "torch.argsort": { + "ms_api": [ + "ops.argsort", + { + "input": "REQUIRED", + "axis": -1, + "descending": false + } + ], + "pt_api": [ + "torch.argsort", + { + "input": "REQUIRED", + "dim": -1.0, + "descending": false, + "stable": false + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "descending": "descending" + }, + "gen_explicit_map": null + }, + "torch.asin": { + "ms_api": [ + "ops.asin", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.asin", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.asinh": { + "ms_api": [ + "ops.asinh", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.asinh", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.atan": { + "ms_api": [ + "ops.atan", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.atan", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.atan2": { + "ms_api": [ + "ops.atan2", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.atan2", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.atanh": { + "ms_api": [ + "ops.atanh", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.atanh", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.atleast_1d": { + "ms_api": [ + "ops.atleast_1d", + { + "inputs": "REQUIRED" + } + ], + "pt_api": [ + "torch.atleast_1d", + { + "*tensors": "REQUIRED" + } + ], + "ms2pt_mapping": { + "inputs": "*tensors" + }, + "gen_explicit_map": null + }, + "torch.atleast_2d": { + "ms_api": [ + "ops.atleast_2d", + { + "inputs": "REQUIRED" + } + ], + "pt_api": [ + "torch.atleast_2d", + { + "*tensors": "REQUIRED" + } + ], + "ms2pt_mapping": { + "inputs": "*tensors" + }, + "gen_explicit_map": null + }, + "torch.atleast_3d": { + "ms_api": [ + "ops.atleast_3d", + { + "inputs": "REQUIRED" + } + ], + "pt_api": [ + "torch.atleast_3d", + { + "*tensors": "REQUIRED" + } + ], + "ms2pt_mapping": { + "inputs": "*tensors" + }, + "gen_explicit_map": null + }, + "torch.baddbmm": { + "ms_api": [ + "ops.baddbmm", + { + "input": "REQUIRED", + "batch1": "REQUIRED", + "batch2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0 + } + ], + "pt_api": [ + "torch.baddbmm", + { + "input": "REQUIRED", + "batch1": "REQUIRED", + "batch2": "REQUIRED", + "beta": 1.0, + "alpha": 1.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "batch1": "batch1", + "batch2": "batch2", + "beta": "beta", + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + "torch.bartlett_window": { + "ms_api": [ + "ops.bartlett_window", + { + "window_length": "REQUIRED", + "periodic": true, + "dtype": null + } + ], + "pt_api": [ + "torch.bartlett_window", + { + "window_length": "REQUIRED", + "periodic": true, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "window_length": "window_length", + "periodic": "periodic", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.bernoulli": { + "ms_api": [ + "ops.bernoulli", + { + "input": "REQUIRED", + "p": 0.5, + "seed": null + } + ], + "pt_api": [ + "torch.bernoulli", + { + "input": "REQUIRED", + "generator": null, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.bincount": { + "ms_api": [ + "ops.bincount", + { + "input": "REQUIRED", + "weights": null, + "minlength": 0.0 + } + ], + "pt_api": [ + "torch.bincount", + { + "input": "REQUIRED", + "weights": null, + "minlength": 0.0 + } + ], + "ms2pt_mapping": { + "input": "input", + "weights": "weights", + "minlength": "minlength" + }, + "gen_explicit_map": null + }, + "torch.bitwise_and": { + "ms_api": [ + "ops.bitwise_and", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.bitwise_and", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.bitwise_or": { + "ms_api": [ + "ops.bitwise_or", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.bitwise_or", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.bitwise_xor": { + "ms_api": [ + "ops.bitwise_xor", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.bitwise_xor", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.blackman_window": { + "ms_api": [ + "ops.blackman_window", + { + "window_length": "REQUIRED", + "periodic": true, + "dtype": null + } + ], + "pt_api": [ + "torch.blackman_window", + { + "window_length": "REQUIRED", + "periodic": true, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "window_length": "window_length", + "periodic": "periodic", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.block_diag": { + "ms_api": [ + "ops.block_diag", + { + "*inputs": "REQUIRED" + } + ], + "pt_api": [ + "torch.block_diag", + { + "*tensors": "REQUIRED" + } + ], + "ms2pt_mapping": { + "*inputs": "*tensors" + }, + "gen_explicit_map": null + }, + "torch.bmm": { + "ms_api": [ + "ops.bmm", + { + "input_x": "REQUIRED", + "mat2": "REQUIRED" + } + ], + "pt_api": [ + "torch.bmm", + { + "input": "REQUIRED", + "mat2": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input_x": "input", + "mat2": "mat2" + }, + "gen_explicit_map": null + }, + "torch.bucketize": { + "ms_api": [ + "ops.bucketize", + { + "input": "REQUIRED", + "boundaries": "REQUIRED", + "right": false + } + ], + "pt_api": [ + "torch.bucketize", + { + "input": "REQUIRED", + "boundaries": "REQUIRED", + "out_int32": false, + "right": false, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "boundaries": "boundaries", + "right": "right" + }, + "gen_explicit_map": null + }, + "torch.broadcast_to": { + "ms_api": [ + "ops.broadcast_to", + { + "input": "REQUIRED", + "shape": "REQUIRED" + } + ], + "pt_api": [ + "torch.broadcast_to", + { + "input": "REQUIRED", + "shape": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "shape": "shape" + }, + "gen_explicit_map": null + }, + "torch.cat": { + "ms_api": [ + "ops.cat", + { + "tensors": "REQUIRED", + "axis": 0.0 + } + ], + "pt_api": [ + "torch.cat", + { + "tensors": "REQUIRED", + "dim": 0.0, + "out": null + } + ], + "ms2pt_mapping": { + "tensors": "tensors", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "torch.cdist": { + "ms_api": [ + "ops.cdist", + { + "x1": "REQUIRED", + "x2": "REQUIRED", + "p": 2.0 + } + ], + "pt_api": [ + "torch.cdist", + { + "x1": "REQUIRED", + "x2": "REQUIRED", + "p": 2.0, + "compute_mode": "use_mm_for_euclid_dist_if_necessary" + } + ], + "ms2pt_mapping": { + "x1": "x1", + "x2": "x2", + "p": "p" + }, + "gen_explicit_map": null + }, + "torch.ceil": { + "ms_api": [ + "ops.ceil", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.ceil", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.cholesky": { + "ms_api": [ + "ops.cholesky", + { + "input_x": "REQUIRED", + "upper": false + } + ], + "pt_api": [ + "torch.cholesky", + { + "input": "REQUIRED", + "upper": false, + "out": null + } + ], + "ms2pt_mapping": { + "input_x": "input", + "upper": "upper" + }, + "gen_explicit_map": null + }, + "torch.cholesky_solve": { + "ms_api": [ + "ops.cholesky_solve", + { + "input": "REQUIRED", + "input2": "REQUIRED", + "upper": false + } + ], + "pt_api": [ + "torch.cholesky_solve", + { + "input": "REQUIRED", + "input2": "REQUIRED", + "upper": false, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "input2": "input2", + "upper": "upper" + }, + "gen_explicit_map": null + }, + "torch.chunk": { + "ms_api": [ + "ops.chunk", + { + "input": "REQUIRED", + "chunks": "REQUIRED", + "axis": 0.0 + } + ], + "pt_api": [ + "torch.chunk", + { + "input": "REQUIRED", + "chunks": "REQUIRED", + "dim": 0.0 + } + ], + "ms2pt_mapping": { + "input": "input", + "chunks": "chunks", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "torch.clamp": { + "ms_api": [ + "ops.clamp", + { + "input": "REQUIRED", + "min": null, + "max": null + } + ], + "pt_api": [ + "torch.clamp", + { + "input": "REQUIRED", + "min": null, + "max": null, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "min": "min", + "max": "max" + }, + "gen_explicit_map": null + }, + "torch.clip": { + "ms_api": [ + "ops.clip", + { + "input": "REQUIRED", + "min": null, + "max": null + } + ], + "pt_api": [ + "torch.clip", + { + "input": "REQUIRED", + "min": null, + "max": null, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "min": "min", + "max": "max" + }, + "gen_explicit_map": null + }, + "torch.column_stack": { + "ms_api": [ + "ops.column_stack", + { + "tensors": "REQUIRED" + } + ], + "pt_api": [ + "torch.column_stack", + { + "tensors": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "tensors": "tensors" + }, + "gen_explicit_map": null + }, + "torch.combinations": { + "ms_api": [ + "ops.combinations", + { + "input": "REQUIRED", + "r": 2.0, + "with_replacement": false + } + ], + "pt_api": [ + "torch.combinations", + { + "input": "REQUIRED", + "r": 2.0, + "with_replacement": false + } + ], + "ms2pt_mapping": { + "input": "input", + "r": "r", + "with_replacement": "with_replacement" + }, + "gen_explicit_map": null + }, + "torch.conj": { + "ms_api": [ + "ops.conj", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.conj", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.copysign": { + "ms_api": [ + "ops.copysign", + { + "x": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.copysign", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "x": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.cos": { + "ms_api": [ + "ops.cos", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.cos", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.cosh": { + "ms_api": [ + "ops.cosh", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.cosh", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.count_nonzero": { + "ms_api": [ + "ops.count_nonzero", + { + "x": "REQUIRED", + "axis": "REQUIRED", + "keep_dims": false, + "dtype": "mstype.int32" + } + ], + "pt_api": [ + "torch.count_nonzero", + { + "input": "REQUIRED", + "dim": null + } + ], + "ms2pt_mapping": { + "x": "input", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "torch.cross": { + "ms_api": [ + "ops.cross", + { + "input": "REQUIRED", + "other": "REQUIRED", + "dim": null + } + ], + "pt_api": [ + "torch.cross", + { + "input": "REQUIRED", + "other": "REQUIRED", + "dim": null, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other", + "dim": "dim" + }, + "gen_explicit_map": null + }, + "torch.cummax": { + "ms_api": [ + "ops.cummax", + { + "input": "REQUIRED", + "axis": "REQUIRED" + } + ], + "pt_api": [ + "torch.cummax", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "torch.cummin": { + "ms_api": [ + "ops.cummin", + { + "input": "REQUIRED", + "axis": "REQUIRED" + } + ], + "pt_api": [ + "torch.cummin", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "torch.cumprod": { + "ms_api": [ + "ops.cumprod", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "dtype": null + } + ], + "pt_api": [ + "torch.cumprod", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "dtype": null, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "dim": "dim", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.cumsum": { + "ms_api": [ + "ops.cumsum", + { + "x": "REQUIRED", + "axis": "REQUIRED", + "dtype": null + } + ], + "pt_api": [ + "torch.cumsum", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "dtype": null, + "out": null + } + ], + "ms2pt_mapping": { + "x": "input", + "axis": "dim", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.deg2rad": { + "ms_api": [ + "ops.deg2rad", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "torch.deg2rad", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "torch.diag": { + "ms_api": [ + "ops.diag", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.diag", + { + "input": "REQUIRED", + "diagonal": 0.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.diag_embed": { + "ms_api": [ + "ops.diag_embed", + { + "input": "REQUIRED", + "offset": 0.0, + "dim1": "- 2", + "dim2": -1 + } + ], + "pt_api": [ + "torch.diag_embed", + { + "input": "REQUIRED", + "offset": 0.0, + "dim1": -2.0, + "dim2": -1.0 + } + ], + "ms2pt_mapping": { + "input": "input", + "offset": "offset", + "dim1": "dim1", + "dim2": "dim2" + }, + "gen_explicit_map": null + }, + "torch.diagflat": { + "ms_api": [ + "ops.diagflat", + { + "input": "REQUIRED", + "offset": 0.0 + } + ], + "pt_api": [ + "torch.diagflat", + { + "input": "REQUIRED", + "offset": 0.0 + } + ], + "ms2pt_mapping": { + "input": "input", + "offset": "offset" + }, + "gen_explicit_map": null + }, + "torch.diagonal": { + "ms_api": [ + "ops.diagonal", + { + "input": "REQUIRED", + "offset": 0.0, + "dim1": 0.0, + "dim2": 1.0 + } + ], + "pt_api": [ + "torch.diagonal", + { + "input": "REQUIRED", + "offset": 0.0, + "dim1": 0.0, + "dim2": 1.0 + } + ], + "ms2pt_mapping": { + "input": "input", + "offset": "offset", + "dim1": "dim1", + "dim2": "dim2" + }, + "gen_explicit_map": null + }, + "torch.diff": { + "ms_api": [ + "ops.diff", + { + "x": "REQUIRED", + "n": 1.0, + "axis": -1, + "prepend": null, + "append": null + } + ], + "pt_api": [ + "torch.diff", + { + "input": "REQUIRED", + "n": 1.0, + "dim": -1.0, + "prepend": null, + "append": null + } + ], + "ms2pt_mapping": { + "x": "input", + "n": "n", + "axis": "dim", + "prepend": "prepend", + "append": "append" + }, + "gen_explicit_map": null + }, + "torch.digamma": { + "ms_api": [ + "ops.digamma", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.digamma", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.dist": { + "ms_api": [ + "ops.dist", + { + "input": "REQUIRED", + "other": "REQUIRED", + "p": 2.0 + } + ], + "pt_api": [ + "torch.dist", + { + "input": "REQUIRED", + "other": "REQUIRED", + "p": 2.0 + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other", + "p": "p" + }, + "gen_explicit_map": null + }, + "torch.div": { + "ms_api": [ + "ops.div", + { + "input": "REQUIRED", + "other": "REQUIRED", + "rounding_mode": null + } + ], + "pt_api": [ + "torch.div", + { + "input": "REQUIRED", + "other": "REQUIRED", + "rounding_mode": null, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other", + "rounding_mode": "rounding_mode" + }, + "gen_explicit_map": null + }, + "torch.divide": { + "ms_api": [ + "ops.divide", + { + "input": "REQUIRED", + "other": "REQUIRED", + "rounding_mode": null + } + ], + "pt_api": [ + "torch.divide", + { + "input": "REQUIRED", + "other": "REQUIRED", + "rounding_mode": null, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other", + "rounding_mode": "rounding_mode" + }, + "gen_explicit_map": null + }, + "torch.dot": { + "ms_api": [ + "ops.tensor_dot", + { + "x1": "REQUIRED", + "x2": "REQUIRED", + "axes": "REQUIRED" + } + ], + "pt_api": [ + "torch.dot", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "x1": "input", + "x2": "other" + }, + "gen_explicit_map": null + }, + "torch.dstack": { + "ms_api": [ + "ops.dstack", + { + "inputs": "REQUIRED" + } + ], + "pt_api": [ + "torch.dstack", + { + "tensors": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "inputs": "tensors" + }, + "gen_explicit_map": null + }, + "torch.einsum": { + "ms_api": [ + "ops.einsum", + { + "equation": "REQUIRED", + "*operands": "REQUIRED" + } + ], + "pt_api": [ + "torch.einsum", + { + "equation": "REQUIRED", + "*operands": "REQUIRED" + } + ], + "ms2pt_mapping": { + "equation": "equation", + "*operands": "*operands" + }, + "gen_explicit_map": null + }, + "torch.eq": { + "ms_api": [ + "ops.equal", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.eq", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.erf": { + "ms_api": [ + "ops.erf", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.erf", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.erfc": { + "ms_api": [ + "ops.erfc", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.erfc", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.erfinv": { + "ms_api": [ + "ops.erfinv", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.erfinv", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.exp": { + "ms_api": [ + "ops.exp", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.exp", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.exp2": { + "ms_api": [ + "ops.exp2", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.exp2", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.expm1": { + "ms_api": [ + "ops.expm1", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.expm1", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.eye": { + "ms_api": [ + "ops.eye", + { + "n": "REQUIRED", + "m": null, + "dtype": null + } + ], + "pt_api": [ + "torch.eye", + { + "n": "REQUIRED", + "m": null, + "out": null, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "n": "n", + "m": "m", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.flatten": { + "ms_api": [ + "ops.flatten", + { + "input": "REQUIRED", + "order": "C", + "start_dim": 1.0, + "end_dim": -1 + } + ], + "pt_api": [ + "torch.flatten", + { + "input": "REQUIRED", + "start_dim": 0.0, + "end_dim": -1.0 + } + ], + "ms2pt_mapping": { + "input": "input", + "start_dim": "start_dim", + "end_dim": "end_dim" + }, + "gen_explicit_map": null + }, + "torch.float_power": { + "ms_api": [ + "ops.float_power", + { + "input": "REQUIRED", + "exponent": "REQUIRED" + } + ], + "pt_api": [ + "torch.float_power", + { + "input": "REQUIRED", + "exponent": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "exponent": "exponent" + }, + "gen_explicit_map": null + }, + "torch.flip": { + "ms_api": [ + "ops.flip", + { + "input": "REQUIRED", + "dims": "REQUIRED" + } + ], + "pt_api": [ + "torch.flip", + { + "input": "REQUIRED", + "dims": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "dims": "dims" + }, + "gen_explicit_map": null + }, + "torch.fliplr": { + "ms_api": [ + "ops.fliplr", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.fliplr", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.flipud": { + "ms_api": [ + "ops.flipud", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.flipud", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.floor": { + "ms_api": [ + "ops.floor", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.floor", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.floor_divide": { + "ms_api": [ + "ops.floor_divide", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.floor_divide", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.fmax": { + "ms_api": [ + "ops.fmax", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.fmax", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.fmod": { + "ms_api": [ + "ops.fmod", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.fmod", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.frac": { + "ms_api": [ + "ops.frac", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "torch.frac", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "torch.full": { + "ms_api": [ + "ops.full", + { + "size": "REQUIRED", + "fill_value": "REQUIRED", + "dtype": null + } + ], + "pt_api": [ + "torch.full", + { + "size": "REQUIRED", + "fill_value": "REQUIRED", + "out": null, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "size": "size", + "fill_value": "fill_value", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.full_like": { + "ms_api": [ + "ops.full_like", + { + "input": "REQUIRED", + "fill_value": "REQUIRED", + "dtype": null + } + ], + "pt_api": [ + "torch.full_like", + { + "input": "REQUIRED", + "fill_value": "REQUIRED", + "\\*": "REQUIRED", + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false, + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": { + "input": "input", + "fill_value": "fill_value", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.gather": { + "ms_api": [ + "ops.gather_elements", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "index": "REQUIRED" + } + ], + "pt_api": [ + "torch.gather", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "index": "REQUIRED", + "sparse_grad": false, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "dim": "dim", + "index": "index" + }, + "gen_explicit_map": null + }, + "torch.gcd": { + "ms_api": [ + "ops.gcd", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.gcd", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.ge": { + "ms_api": [ + "ops.ge", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.ge", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.geqrf": { + "ms_api": [ + "ops.geqrf", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.geqrf", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.ger": { + "ms_api": [ + "ops.ger", + { + "input": "REQUIRED", + "vec2": "REQUIRED" + } + ], + "pt_api": [ + "torch.ger", + { + "input": "REQUIRED", + "vec2": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "vec2": "vec2" + }, + "gen_explicit_map": null + }, + "torch.greater": { + "ms_api": [ + "ops.greater", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.greater", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.greater_equal": { + "ms_api": [ + "ops.greater_equal", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.greater_equal", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.gt": { + "ms_api": [ + "ops.gt", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.gt", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.hann_window": { + "ms_api": [ + "ops.hann_window", + { + "window_length": "REQUIRED", + "periodic": true, + "dtype": null + } + ], + "pt_api": [ + "torch.hann_window", + { + "window_length": "REQUIRED", + "periodic": true, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "window_length": "window_length", + "periodic": "periodic", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.heaviside": { + "ms_api": [ + "ops.heaviside", + { + "input": "REQUIRED", + "values": "REQUIRED" + } + ], + "pt_api": [ + "torch.heaviside", + { + "input": "REQUIRED", + "values": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "values": "values" + }, + "gen_explicit_map": null + }, + "torch.hstack": { + "ms_api": [ + "ops.hstack", + { + "tensors": "REQUIRED" + } + ], + "pt_api": [ + "torch.hstack", + { + "tensors": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "tensors": "tensors" + }, + "gen_explicit_map": null + }, + "torch.histc": { + "ms_api": [ + "ops.histc", + { + "input": "REQUIRED", + "bins": 100.0, + "min": 0.0, + "max": 0.0 + } + ], + "pt_api": [ + "torch.histc", + { + "input": "REQUIRED", + "bins": 100.0, + "min": 0.0, + "max": 0.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "bins": "bins", + "min": "min", + "max": "max" + }, + "gen_explicit_map": null + }, + "torch.hypot": { + "ms_api": [ + "ops.hypot", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.hypot", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.hamming_window": { + "ms_api": [ + "ops.hamming_window", + { + "window_length": "REQUIRED", + "periodic": true, + "alpha": 0.54, + "beta": 0.46, + "dtype": null + } + ], + "pt_api": [ + "torch.hamming_window", + { + "window_length": "REQUIRED", + "periodic": true, + "alpha": 0.54, + "beta": 0.46, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "window_length": "window_length", + "periodic": "periodic", + "alpha": "alpha", + "beta": "beta", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.i0": { + "ms_api": [ + "ops.i0", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.i0", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.igamma": { + "ms_api": [ + "ops.igamma", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.igamma", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.igammac": { + "ms_api": [ + "ops.igammac", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.igammac", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.imag": { + "ms_api": [ + "ops.imag", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.imag", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.index_select": { + "ms_api": [ + "ops.index_select", + { + "input": "REQUIRED", + "axis": "REQUIRED", + "index": "REQUIRED" + } + ], + "pt_api": [ + "torch.index_select", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "index": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "index": "index" + }, + "gen_explicit_map": null + }, + "torch.inner": { + "ms_api": [ + "ops.inner", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.inner", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.inverse": { + "ms_api": [ + "ops.inverse", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.inverse", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.isclose": { + "ms_api": [ + "ops.isclose", + { + "input": "REQUIRED", + "other": "REQUIRED", + "rtol": 1e-05, + "atol": 1e-08, + "equal_nan": false + } + ], + "pt_api": [ + "torch.isclose", + { + "input": "REQUIRED", + "other": "REQUIRED", + "rtol": 1e-05, + "atol": 1e-08, + "equal_nan": false + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other", + "rtol": "rtol", + "atol": "atol", + "equal_nan": "equal_nan" + }, + "gen_explicit_map": null + }, + "torch.isfinite": { + "ms_api": [ + "ops.isfinite", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "torch.isfinite", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "torch.isinf": { + "ms_api": [ + "ops.isinf", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.isinf", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.isnan": { + "ms_api": [ + "ops.isnan", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.isnan", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.isneginf": { + "ms_api": [ + "ops.isneginf", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.isneginf", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.isposinf": { + "ms_api": [ + "ops.isposinf", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.isposinf", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.isreal": { + "ms_api": [ + "ops.isreal", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.isreal", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.is_complex": { + "ms_api": [ + "ops.is_complex", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.is_complex", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.is_floating_point": { + "ms_api": [ + "ops.is_floating_point", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.is_floating_point", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.is_nonzero": { + "ms_api": [ + "ops.is_nonzero", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.is_nonzero", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.is_tensor": { + "ms_api": [ + "ops.is_tensor", + { + "obj": "REQUIRED" + } + ], + "pt_api": [ + "torch.is_tensor", + { + "obj": "REQUIRED" + } + ], + "ms2pt_mapping": { + "obj": "obj" + }, + "gen_explicit_map": null + }, + "torch.kaiser_window": { + "ms_api": [ + "ops.kaiser_window", + { + "window_length": "REQUIRED", + "periodic": true, + "beta": 12.0, + "dtype": null + } + ], + "pt_api": [ + "torch.kaiser_window", + { + "window_length": "REQUIRED", + "periodic": true, + "beta": 12.0, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "window_length": "window_length", + "periodic": "periodic", + "beta": "beta", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.kron": { + "ms_api": [ + "ops.kron", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.kron", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.lcm": { + "ms_api": [ + "ops.lcm", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.lcm", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.ldexp": { + "ms_api": [ + "ops.ldexp", + { + "x": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.ldexp", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "x": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.le": { + "ms_api": [ + "ops.le", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.le", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.lerp": { + "ms_api": [ + "ops.lerp", + { + "input": "REQUIRED", + "end": "REQUIRED", + "weight": "REQUIRED" + } + ], + "pt_api": [ + "torch.lerp", + { + "input": "REQUIRED", + "end": "REQUIRED", + "weight": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "end": "end", + "weight": "weight" + }, + "gen_explicit_map": null + }, + "torch.less": { + "ms_api": [ + "ops.less", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.less", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.less_equal": { + "ms_api": [ + "ops.less_equal", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.less_equal", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.linalg.cond": { + "ms_api": [ + "ops.cond", + { + "A": "REQUIRED", + "p": null + } + ], + "pt_api": [ + "torch.linalg.cond", + { + "A": "REQUIRED", + "p": null, + "out": null + } + ], + "ms2pt_mapping": { + "A": "A", + "p": "p" + }, + "gen_explicit_map": null + }, + "torch.linalg.eigvals": { + "ms_api": [ + "ops.eigvals", + { + "A": "REQUIRED" + } + ], + "pt_api": [ + "torch.linalg.eigvals", + { + "A": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "A": "A" + }, + "gen_explicit_map": null + }, + "torch.linalg.norm": { + "ms_api": [ + "ops.norm", + { + "A": "REQUIRED", + "ord": null, + "dim": null, + "keepdim": false, + "dtype": null + } + ], + "pt_api": [ + "torch.linalg.norm", + { + "A": "REQUIRED", + "ord": null, + "dim": null, + "keepdim": false, + "out": null, + "dtype": null + } + ], + "ms2pt_mapping": { + "A": "A", + "ord": "ord", + "dim": "dim", + "keepdim": "keepdim", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.linspace": { + "ms_api": [ + "ops.linspace", + { + "start": "REQUIRED", + "end": "REQUIRED", + "steps": "REQUIRED" + } + ], + "pt_api": [ + "torch.linspace", + { + "start": "REQUIRED", + "end": "REQUIRED", + "steps": "REQUIRED", + "out": null, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "start": "start", + "end": "end", + "steps": "steps" + }, + "gen_explicit_map": null + }, + "torch.log": { + "ms_api": [ + "ops.log", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.log", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.log2": { + "ms_api": [ + "ops.log2", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.log2", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.log10": { + "ms_api": [ + "ops.log10", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.log10", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.logaddexp": { + "ms_api": [ + "ops.logaddexp", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.logaddexp", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.logaddexp2": { + "ms_api": [ + "ops.logaddexp2", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.logaddexp2", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.logcumsumexp": { + "ms_api": [ + "ops.logcumsumexp", + { + "input": "REQUIRED", + "axis": "REQUIRED" + } + ], + "pt_api": [ + "torch.logcumsumexp", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "torch.log1p": { + "ms_api": [ + "ops.log1p", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.log1p", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.logdet": { + "ms_api": [ + "ops.logdet", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.logdet", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.logical_and": { + "ms_api": [ + "ops.logical_and", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.logical_and", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.logical_not": { + "ms_api": [ + "ops.logical_not", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.logical_not", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.logical_or": { + "ms_api": [ + "ops.logical_or", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.logical_or", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.logical_xor": { + "ms_api": [ + "ops.logical_xor", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.logical_xor", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.logit": { + "ms_api": [ + "ops.logit", + { + "input": "REQUIRED", + "eps": null + } + ], + "pt_api": [ + "torch.logit", + { + "input": "REQUIRED", + "eps": null, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "eps": "eps" + }, + "gen_explicit_map": null + }, + "torch.logspace": { + "ms_api": [ + "ops.logspace", + { + "start": "REQUIRED", + "end": "REQUIRED", + "steps": "REQUIRED", + "base": 10.0, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "torch.logspace", + { + "start": "REQUIRED", + "end": "REQUIRED", + "steps": "REQUIRED", + "base": 10.0, + "out": null, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "start": "start", + "end": "end", + "steps": "steps", + "base": "base", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.logsumexp": { + "ms_api": [ + "ops.logsumexp", + { + "input": "REQUIRED", + "axis": "REQUIRED", + "keep_dims": false + } + ], + "pt_api": [ + "torch.logsumexp", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "keepdim": false, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "keep_dims": "keepdim" + }, + "gen_explicit_map": null + }, + "torch.lt": { + "ms_api": [ + "ops.lt", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.lt", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.lu_solve": { + "ms_api": [ + "ops.lu_solve", + { + "b": "REQUIRED", + "LU_data": "REQUIRED", + "LU_pivots": "REQUIRED" + } + ], + "pt_api": [ + "torch.lu_solve", + { + "b": "REQUIRED", + "LU_data": "REQUIRED", + "LU_pivots": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "b": "b", + "LU_data": "LU_data", + "LU_pivots": "LU_pivots" + }, + "gen_explicit_map": null + }, + "torch.lu_unpack": { + "ms_api": [ + "ops.lu_unpack", + { + "LU_data": "REQUIRED", + "LU_pivots": "REQUIRED", + "unpack_data": true, + "unpack_pivots": true + } + ], + "pt_api": [ + "torch.lu_unpack", + { + "LU_data": "REQUIRED", + "LU_pivots": "REQUIRED", + "unpack_data": true, + "unpack_pivots": true, + "out": null + } + ], + "ms2pt_mapping": { + "LU_data": "LU_data", + "LU_pivots": "LU_pivots", + "unpack_data": "unpack_data", + "unpack_pivots": "unpack_pivots" + }, + "gen_explicit_map": null + }, + "torch.masked_select": { + "ms_api": [ + "ops.masked_select", + { + "input": "REQUIRED", + "mask": "REQUIRED" + } + ], + "pt_api": [ + "torch.masked_select", + { + "input": "REQUIRED", + "mask": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "mask": "mask" + }, + "gen_explicit_map": null + }, + "torch.matmul": { + "ms_api": [ + "ops.matmul", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.matmul", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.max": { + "ms_api": [ + "ops.max", + { + "input": "REQUIRED", + "axis": null, + "keepdims": false, + "initial": null, + "where": null + } + ], + "pt_api": [ + "torch.max", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.maximum": { + "ms_api": [ + "ops.maximum", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.maximum", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.mean": { + "ms_api": [ + "ops.mean", + { + "x": "REQUIRED", + "axis": null, + "keep_dims": false + } + ], + "pt_api": [ + "torch.mean", + { + "input": "REQUIRED", + "dtype": null + } + ], + "ms2pt_mapping": { + "x": "input", + "axis": "dtype" + }, + "gen_explicit_map": null + }, + "torch.median": { + "ms_api": [ + "ops.median", + { + "input": "REQUIRED", + "axis": -1, + "keepdims": false + } + ], + "pt_api": [ + "torch.median", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.meshgrid": { + "ms_api": [ + "ops.meshgrid", + { + "*inputs": "REQUIRED", + "indexing": "xy" + } + ], + "pt_api": [ + "torch.meshgrid", + { + "*tensors": "REQUIRED", + "indexing": null + } + ], + "ms2pt_mapping": { + "*inputs": "*tensors", + "indexing": "indexing" + }, + "gen_explicit_map": null + }, + "torch.mm": { + "ms_api": [ + "ops.mm", + { + "input": "REQUIRED", + "mat2": "REQUIRED" + } + ], + "pt_api": [ + "torch.mm", + { + "input": "REQUIRED", + "mat2": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "mat2": "mat2" + }, + "gen_explicit_map": null + }, + "torch.mul": { + "ms_api": [ + "ops.mul", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.mul", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.min": { + "ms_api": [ + "ops.min", + { + "input": "REQUIRED", + "axis": null, + "keepdims": false, + "initial": null, + "where": null + } + ], + "pt_api": [ + "torch.min", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.minimum": { + "ms_api": [ + "ops.minimum", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.minimum", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.msort": { + "ms_api": [ + "ops.msort", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.msort", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.moveaxis": { + "ms_api": [ + "ops.moveaxis", + { + "x": "REQUIRED", + "source": "REQUIRED", + "destination": "REQUIRED" + } + ], + "pt_api": [ + "torch.moveaxis", + { + "input": "REQUIRED", + "source": "REQUIRED", + "destination": "REQUIRED" + } + ], + "ms2pt_mapping": { + "x": "input", + "source": "source", + "destination": "destination" + }, + "gen_explicit_map": null + }, + "torch.movedim": { + "ms_api": [ + "ops.movedim", + { + "x": "REQUIRED", + "source": "REQUIRED", + "destination": "REQUIRED" + } + ], + "pt_api": [ + "torch.movedim", + { + "input": "REQUIRED", + "source": "REQUIRED", + "destination": "REQUIRED" + } + ], + "ms2pt_mapping": { + "x": "input", + "source": "source", + "destination": "destination" + }, + "gen_explicit_map": null + }, + "torch.multinomial": { + "ms_api": [ + "ops.multinomial", + { + "input": "REQUIRED", + "num_samples": "REQUIRED", + "replacement": true, + "seed": null + } + ], + "pt_api": [ + "torch.multinomial", + { + "input": "REQUIRED", + "num_samples": "REQUIRED", + "replacement": false, + "generator": null, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "num_samples": "num_samples", + "replacement": "replacement", + "seed": "generator" + }, + "gen_explicit_map": null + }, + "torch.multiply": { + "ms_api": [ + "ops.multiply", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.multiply", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.mv": { + "ms_api": [ + "ops.mv", + { + "mat": "REQUIRED", + "vec": "REQUIRED" + } + ], + "pt_api": [ + "torch.mv", + { + "input": "REQUIRED", + "vec": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "vec": "vec" + }, + "gen_explicit_map": null + }, + "torch.mvlgamma": { + "ms_api": [ + "ops.mvlgamma", + { + "input": "REQUIRED", + "p": "REQUIRED" + } + ], + "pt_api": [ + "torch.mvlgamma", + { + "input": "REQUIRED", + "p": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "p": "p" + }, + "gen_explicit_map": null + }, + "torch.nan_to_num": { + "ms_api": [ + "ops.nan_to_num", + { + "input": "REQUIRED", + "nan": 0.0, + "posinf": null, + "neginf": null + } + ], + "pt_api": [ + "torch.nan_to_num", + { + "input": "REQUIRED", + "nan": 0.0, + "posinf": null, + "neginf": null, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "nan": "nan", + "posinf": "posinf", + "neginf": "neginf" + }, + "gen_explicit_map": null + }, + "torch.nansum": { + "ms_api": [ + "ops.nansum", + { + "input": "REQUIRED", + "axis": null, + "keepdims": false, + "dtype": null + } + ], + "pt_api": [ + "torch.nansum", + { + "input": "REQUIRED", + "dtype": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dtype", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.narrow": { + "ms_api": [ + "ops.narrow", + { + "input": "REQUIRED", + "axis": "REQUIRED", + "start": "REQUIRED", + "length": "REQUIRED" + } + ], + "pt_api": [ + "torch.narrow", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "start": "REQUIRED", + "length": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "start": "start", + "length": "length" + }, + "gen_explicit_map": null + }, + "torch.ne": { + "ms_api": [ + "ops.ne", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.ne", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.neg": { + "ms_api": [ + "ops.neg", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.neg", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.negative": { + "ms_api": [ + "ops.negative", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.negative", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.nextafter": { + "ms_api": [ + "ops.nextafter", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.nextafter", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.nonzero": { + "ms_api": [ + "ops.nonzero", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.nonzero", + { + "input": "REQUIRED", + "out": null, + "as_tuple": false + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.normal": { + "ms_api": [ + "ops.normal", + { + "shape": "REQUIRED", + "mean": "REQUIRED", + "stddev": "REQUIRED", + "seed": null + } + ], + "pt_api": [ + "torch.normal", + { + "mean": "REQUIRED", + "std": "REQUIRED", + "generator": null, + "out": null + } + ], + "ms2pt_mapping": { + "mean": "mean" + }, + "gen_explicit_map": null + }, + "torch.not_equal": { + "ms_api": [ + "ops.not_equal", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.not_equal", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.numel": { + "ms_api": [ + "ops.numel", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.numel", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.ones": { + "ms_api": [ + "ops.ones", + { + "shape": "REQUIRED", + "dtype": null + } + ], + "pt_api": [ + "torch.ones", + { + "*size": "REQUIRED", + "out": null, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.ones_like": { + "ms_api": [ + "ops.ones_like", + { + "input": "REQUIRED", + "dtype": null + } + ], + "pt_api": [ + "torch.ones_like", + { + "input": "REQUIRED", + "dtype": null, + "layout": null, + "device": null, + "requires_grad": false, + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": { + "input": "input", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.orgqr": { + "ms_api": [ + "ops.orgqr", + { + "input": "REQUIRED", + "input2": "REQUIRED" + } + ], + "pt_api": [ + "torch.orgqr", + { + "input": "REQUIRED", + "tau": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "input2": "tau" + }, + "gen_explicit_map": null + }, + "torch.ormqr": { + "ms_api": [ + "ops.ormqr", + { + "input": "REQUIRED", + "tau": "REQUIRED", + "other": "REQUIRED", + "left": true, + "transpose": false + } + ], + "pt_api": [ + "torch.ormqr", + { + "input": "REQUIRED", + "tau": "REQUIRED", + "other": "REQUIRED", + "left": true, + "transpose": false, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "tau": "tau", + "other": "other", + "left": "left", + "transpose": "transpose" + }, + "gen_explicit_map": null + }, + "torch.outer": { + "ms_api": [ + "ops.outer", + { + "input": "REQUIRED", + "vec2": "REQUIRED" + } + ], + "pt_api": [ + "torch.outer", + { + "input": "REQUIRED", + "vec2": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "vec2": "vec2" + }, + "gen_explicit_map": null + }, + "torch.poisson": { + "ms_api": [ + "ops.random_poisson", + { + "shape": "REQUIRED", + "rate": "REQUIRED", + "seed": null, + "dtype": "mstype.float32" + } + ], + "pt_api": [ + "torch.poisson", + { + "input": "REQUIRED", + "generator": null + } + ], + "ms2pt_mapping": { + "rate": "generator" + }, + "gen_explicit_map": null + }, + "torch.polar": { + "ms_api": [ + "ops.polar", + { + "abs": "REQUIRED", + "angle": "REQUIRED" + } + ], + "pt_api": [ + "torch.polar", + { + "abs": "REQUIRED", + "angle": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "abs": "abs", + "angle": "angle" + }, + "gen_explicit_map": null + }, + "torch.polygamma": { + "ms_api": [ + "ops.polygamma", + { + "n": "REQUIRED", + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.polygamma", + { + "n": "REQUIRED", + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "n": "n", + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.pow": { + "ms_api": [ + "ops.pow", + { + "input": "REQUIRED", + "exponent": "REQUIRED" + } + ], + "pt_api": [ + "torch.pow", + { + "input": "REQUIRED", + "exponent": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "exponent": "exponent" + }, + "gen_explicit_map": null + }, + "torch.prod": { + "ms_api": [ + "ops.prod", + { + "input": "REQUIRED", + "axis": null, + "keep_dims": false + } + ], + "pt_api": [ + "torch.prod", + { + "input": "REQUIRED", + "dtype": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dtype" + }, + "gen_explicit_map": null + }, + "torch.rad2deg": { + "ms_api": [ + "ops.rad2deg", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "torch.rad2deg", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "torch.rand": { + "ms_api": [ + "ops.rand", + { + "*size": "REQUIRED", + "dtype": null, + "seed": null + } + ], + "pt_api": [ + "torch.rand", + { + "*size": "REQUIRED", + "generator": null, + "out": null, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false, + "pin_memory": false + } + ], + "ms2pt_mapping": { + "*size": "*size", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.rand_like": { + "ms_api": [ + "ops.rand_like", + { + "input": "REQUIRED", + "seed": null, + "dtype": null + } + ], + "pt_api": [ + "torch.rand_like", + { + "input": "REQUIRED", + "dtype": null, + "layout": null, + "device": null, + "requires_grad": false, + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": { + "input": "input", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.randn": { + "ms_api": [ + "ops.randn", + { + "*size": "REQUIRED", + "dtype": null, + "seed": null + } + ], + "pt_api": [ + "torch.randn", + { + "*size": "REQUIRED", + "generator": null, + "out": null, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false, + "pin_memory": false + } + ], + "ms2pt_mapping": { + "*size": "*size", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.randn_like": { + "ms_api": [ + "ops.randn_like", + { + "input": "REQUIRED", + "seed": null, + "dtype": null + } + ], + "pt_api": [ + "torch.randn_like", + { + "input": "REQUIRED", + "dtype": null, + "layout": null, + "device": null, + "requires_grad": false, + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": { + "input": "input", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.randint": { + "ms_api": [ + "ops.randint", + { + "low": "REQUIRED", + "high": "REQUIRED", + "size": "REQUIRED", + "seed": null, + "dtype": null + } + ], + "pt_api": [ + "torch.randint", + { + "low": 0.0, + "high": "REQUIRED", + "size": "REQUIRED", + "\\*": "REQUIRED", + "generator": null, + "out": null, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "low": "low", + "high": "high", + "size": "size", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.randint_like": { + "ms_api": [ + "ops.randint_like", + { + "input": "REQUIRED", + "low": "REQUIRED", + "high": "REQUIRED", + "dtype": null, + "seed": null + } + ], + "pt_api": [ + "torch.randint_like", + { + "input": "REQUIRED", + "low": 0.0, + "high": "REQUIRED", + "\\*": "REQUIRED", + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false, + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": { + "input": "input", + "low": "low", + "high": "high", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.randperm": { + "ms_api": [ + "ops.randperm", + { + "n": "REQUIRED", + "seed": 0.0, + "offset": 0.0, + "dtype": "mstype.int64" + } + ], + "pt_api": [ + "torch.randperm", + { + "n": "REQUIRED", + "generator": null, + "out": null, + "dtype": "torch.int64", + "layout": "torch.strided", + "device": null, + "requires_grad": false, + "pin_memory": false + } + ], + "ms2pt_mapping": { + "n": "n", + "seed": "generator", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.range": { + "ms_api": [ + "ops.range", + { + "start": "REQUIRED", + "end": "REQUIRED", + "step": "REQUIRED" + } + ], + "pt_api": [ + "torch.range", + { + "start": 0.0, + "end": "REQUIRED", + "step": 1.0, + "out": null, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + "start": "start", + "end": "end", + "step": "step" + }, + "gen_explicit_map": null + }, + "torch.ravel": { + "ms_api": [ + "ops.ravel", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.ravel", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.real": { + "ms_api": [ + "ops.real", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.real", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.reciprocal": { + "ms_api": [ + "ops.reciprocal", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.reciprocal", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.remainder": { + "ms_api": [ + "ops.remainder", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.remainder", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.renorm": { + "ms_api": [ + "ops.renorm", + { + "input": "REQUIRED", + "p": "REQUIRED", + "axis": "REQUIRED", + "maxnorm": "REQUIRED" + } + ], + "pt_api": [ + "torch.renorm", + { + "input": "REQUIRED", + "p": "REQUIRED", + "dim": "REQUIRED", + "maxnorm": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "p": "p", + "axis": "dim", + "maxnorm": "maxnorm" + }, + "gen_explicit_map": null + }, + "torch.repeat_interleave": { + "ms_api": [ + "ops.repeat_interleave", + { + "input": "REQUIRED", + "repeats": "REQUIRED", + "axis": null + } + ], + "pt_api": [ + "torch.repeat_interleave", + { + "input": "REQUIRED", + "repeats": "REQUIRED", + "dim": null, + "output_size": null + } + ], + "ms2pt_mapping": { + "input": "input", + "repeats": "repeats", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "torch.reshape": { + "ms_api": [ + "ops.reshape", + { + "input": "REQUIRED", + "shape": "REQUIRED" + } + ], + "pt_api": [ + "torch.reshape", + { + "input": "REQUIRED", + "shape": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "shape": "shape" + }, + "gen_explicit_map": null + }, + "torch.roll": { + "ms_api": [ + "ops.roll", + { + "input": "REQUIRED", + "shifts": "REQUIRED", + "dims": null + } + ], + "pt_api": [ + "torch.roll", + { + "input": "REQUIRED", + "shifts": "REQUIRED", + "dims": null + } + ], + "ms2pt_mapping": { + "input": "input", + "shifts": "shifts", + "dims": "dims" + }, + "gen_explicit_map": null + }, + "torch.rot90": { + "ms_api": [ + "ops.rot90", + { + "input": "REQUIRED", + "k": "REQUIRED", + "dims": "REQUIRED" + } + ], + "pt_api": [ + "torch.rot90", + { + "input": "REQUIRED", + "k": 1.0, + "dims": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "k": "k", + "dims": "dims" + }, + "gen_explicit_map": null + }, + "torch.round": { + "ms_api": [ + "ops.round", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.round", + { + "input": "REQUIRED", + "decimals": 0.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.row_stack": { + "ms_api": [ + "ops.row_stack", + { + "tensors": "REQUIRED" + } + ], + "pt_api": [ + "torch.row_stack", + { + "tensors": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "tensors": "tensors" + }, + "gen_explicit_map": null + }, + "torch.rsqrt": { + "ms_api": [ + "ops.rsqrt", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.rsqrt", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.searchsorted": { + "ms_api": [ + "ops.searchsorted", + { + "sorted_sequence": "REQUIRED", + "values": "REQUIRED", + "out_int32": false, + "right": false + } + ], + "pt_api": [ + "torch.searchsorted", + { + "sorted_sequence": "REQUIRED", + "values": "REQUIRED", + "out_int32": false, + "right": false, + "side": "left", + "out": null, + "sorter": null + } + ], + "ms2pt_mapping": { + "sorted_sequence": "sorted_sequence", + "values": "values", + "out_int32": "out_int32", + "right": "right" + }, + "gen_explicit_map": null + }, + "torch.scatter": { + "ms_api": [ + "ops.scatter", + { + "input": "REQUIRED", + "axis": "REQUIRED", + "index": "REQUIRED", + "src": "REQUIRED" + } + ], + "pt_api": [ + "torch.scatter", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "index": "REQUIRED", + "src": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "index": "index", + "src": "src" + }, + "gen_explicit_map": null + }, + "torch.scatter_add": { + "ms_api": [ + "ops.tensor_scatter_elements", + { + "input_x": "REQUIRED", + "indices": "REQUIRED", + "updates": "REQUIRED", + "axis": 0.0, + "reduction": "none" + } + ], + "pt_api": [ + "torch.scatter_add", + { + "input": "REQUIRED", + "dim": "REQUIRED", + "index": "REQUIRED", + "src": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input_x": "input", + "indices": "dim", + "axis": "src" + }, + "gen_explicit_map": null + }, + "torch.sgn": { + "ms_api": [ + "ops.sgn", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.sgn", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.sigmoid": { + "ms_api": [ + "ops.sigmoid", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.sigmoid", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.sign": { + "ms_api": [ + "ops.sign", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.sign", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.signbit": { + "ms_api": [ + "ops.signbit", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.signbit", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.sin": { + "ms_api": [ + "ops.sin", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.sin", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.sinc": { + "ms_api": [ + "ops.sinc", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.sinc", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.sinh": { + "ms_api": [ + "ops.sinh", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.sinh", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.slogdet": { + "ms_api": [ + "ops.slogdet", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.slogdet", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.sort": { + "ms_api": [ + "ops.sort", + { + "input_x": "REQUIRED", + "axis": -1, + "descending": false + } + ], + "pt_api": [ + "torch.sort", + { + "input": "REQUIRED", + "dim": -1.0, + "descending": false, + "stable": false, + "out": null + } + ], + "ms2pt_mapping": { + "input_x": "input", + "axis": "dim", + "descending": "descending" + }, + "gen_explicit_map": null + }, + "torch.split": { + "ms_api": [ + "ops.split", + { + "tensor": "REQUIRED", + "split_size_or_sections": "REQUIRED", + "axis": 0.0 + } + ], + "pt_api": [ + "torch.split", + { + "tensor": "REQUIRED", + "split_size_or_sections": "REQUIRED", + "dim": 0.0 + } + ], + "ms2pt_mapping": { + "tensor": "tensor", + "split_size_or_sections": "split_size_or_sections", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "torch.stack": { + "ms_api": [ + "ops.stack", + { + "tensors": "REQUIRED", + "axis": 0.0 + } + ], + "pt_api": [ + "torch.stack", + { + "tensors": "REQUIRED", + "dim": 0.0, + "out": null + } + ], + "ms2pt_mapping": { + "tensors": "tensors", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "torch.squeeze": { + "ms_api": [ + "ops.squeeze", + { + "input": "REQUIRED", + "axis": null + } + ], + "pt_api": [ + "torch.squeeze", + { + "input": "REQUIRED", + "dim": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "torch.std": { + "ms_api": [ + "ops.std", + { + "input": "REQUIRED", + "axis": null, + "ddof": 0.0, + "keepdims": false + } + ], + "pt_api": [ + "torch.std", + { + "input": "REQUIRED", + "dim": null, + "correction": 1.0, + "keepdim": false, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + "torch.std_mean": { + "ms_api": [ + "ops.std_mean", + { + "input": "REQUIRED", + "axis": null, + "ddof": 0.0, + "keepdims": false + } + ], + "pt_api": [ + "torch.std_mean", + { + "input": "REQUIRED", + "dim": null, + "correction": 1.0, + "keepdim": false, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + "torch.sqrt": { + "ms_api": [ + "ops.sqrt", + { + "x": "REQUIRED" + } + ], + "pt_api": [ + "torch.sqrt", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "x": "input" + }, + "gen_explicit_map": null + }, + "torch.square": { + "ms_api": [ + "ops.square", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.square", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.sub": { + "ms_api": [ + "ops.subtract", + { + "input": "REQUIRED", + "other": "REQUIRED", + "alpha": 1.0 + } + ], + "pt_api": [ + "torch.sub", + { + "input": "REQUIRED", + "other": "REQUIRED", + "alpha": 1.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other", + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + "torch.subtract": { + "ms_api": [ + "ops.subtract", + { + "input": "REQUIRED", + "other": "REQUIRED", + "alpha": 1.0 + } + ], + "pt_api": [ + "torch.subtract", + { + "input": "REQUIRED", + "other": "REQUIRED", + "alpha": 1.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other", + "alpha": "alpha" + }, + "gen_explicit_map": null + }, + "torch.sum": { + "ms_api": [ + "ops.sum", + { + "input": "REQUIRED", + "dim": null, + "keepdim": false, + "dtype": null + } + ], + "pt_api": [ + "torch.sum", + { + "input": "REQUIRED", + "dtype": null + } + ], + "ms2pt_mapping": { + "input": "input", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.svd": { + "ms_api": [ + "ops.svd", + { + "input": "REQUIRED", + "full_matrices": false, + "compute_uv": true + } + ], + "pt_api": [ + "torch.svd", + { + "input": "REQUIRED", + "some": true, + "compute_uv": true, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "compute_uv": "compute_uv" + }, + "gen_explicit_map": null + }, + "torch.swapaxes": { + "ms_api": [ + "ops.swapaxes", + { + "input": "REQUIRED", + "axis0": "REQUIRED", + "axis1": "REQUIRED" + } + ], + "pt_api": [ + "torch.swapaxes", + { + "input": "REQUIRED", + "axis0": "REQUIRED", + "axis1": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "axis0": "axis0", + "axis1": "axis1" + }, + "gen_explicit_map": null + }, + "torch.swapdims": { + "ms_api": [ + "ops.swapdims", + { + "input": "REQUIRED", + "dim0": "REQUIRED", + "dim1": "REQUIRED" + } + ], + "pt_api": [ + "torch.swapdims", + { + "input": "REQUIRED", + "dim0": "REQUIRED", + "dim1": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "dim0": "dim0", + "dim1": "dim1" + }, + "gen_explicit_map": null + }, + "torch.t": { + "ms_api": [ + "ops.t", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.t", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.tan": { + "ms_api": [ + "ops.tan", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.tan", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.tanh": { + "ms_api": [ + "ops.tanh", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.tanh", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.tensor_split": { + "ms_api": [ + "ops.tensor_split", + { + "input": "REQUIRED", + "indices_or_sections": "REQUIRED", + "axis": 0.0 + } + ], + "pt_api": [ + "torch.tensor_split", + { + "input": "REQUIRED", + "indices_or_sections": "REQUIRED", + "dim": 0.0 + } + ], + "ms2pt_mapping": { + "input": "input", + "indices_or_sections": "indices_or_sections", + "axis": "dim" + }, + "gen_explicit_map": null + }, + "torch.tile": { + "ms_api": [ + "ops.tile", + { + "input": "REQUIRED", + "multiples": "REQUIRED" + } + ], + "pt_api": [ + "torch.tile", + { + "input": "REQUIRED", + "dims": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "multiples": "dims" + }, + "gen_explicit_map": null + }, + "torch.topk": { + "ms_api": [ + "ops.topk", + { + "input": "REQUIRED", + "k": "REQUIRED", + "dim": null, + "largest": true, + "sorted": true + } + ], + "pt_api": [ + "torch.topk", + { + "input": "REQUIRED", + "k": "REQUIRED", + "dim": null, + "largest": true, + "sorted": true, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "k": "k", + "dim": "dim", + "largest": "largest", + "sorted": "sorted" + }, + "gen_explicit_map": null + }, + "torch.trace": { + "ms_api": [ + "ops.trace", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.trace", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.transpose": { + "ms_api": [ + "ops.swapaxes", + { + "input": "REQUIRED", + "axis0": "REQUIRED", + "axis1": "REQUIRED" + } + ], + "pt_api": [ + "torch.transpose", + { + "input": "REQUIRED", + "dim0": "REQUIRED", + "dim1": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "axis0": "dim0", + "axis1": "dim1" + }, + "gen_explicit_map": null + }, + "torch.trapz": { + "ms_api": [ + "ops.trapz", + { + "y": "REQUIRED", + "x": null, + "dx": 1.0, + "dim": -1 + } + ], + "pt_api": [ + "torch.trapz", + { + "y": "REQUIRED", + "x": "REQUIRED", + "dim": -1.0 + } + ], + "ms2pt_mapping": { + "y": "y", + "x": "x", + "dx": "dim", + "dim": "dim" + }, + "gen_explicit_map": null + }, + "torch.tril": { + "ms_api": [ + "ops.tril", + { + "input": "REQUIRED", + "diagonal": 0.0 + } + ], + "pt_api": [ + "torch.tril", + { + "input": "REQUIRED", + "diagonal": 0.0, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "diagonal": "diagonal" + }, + "gen_explicit_map": null + }, + "torch.tril_indices": { + "ms_api": [ + "ops.tril_indices", + { + "row": "REQUIRED", + "col": "REQUIRED", + "offset": 0.0, + "dtype": "mstype.int64" + } + ], + "pt_api": [ + "torch.tril_indices", + { + "row": "REQUIRED", + "col": "REQUIRED", + "offset": 0.0, + "dtype": "torch.long", + "device": "cpu", + "layout": "torch.strided" + } + ], + "ms2pt_mapping": { + "row": "row", + "col": "col", + "offset": "offset", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.triu_indices": { + "ms_api": [ + "ops.triu_indices", + { + "row": "REQUIRED", + "col": "REQUIRED", + "offset": 0.0, + "dtype": "mstype.int64" + } + ], + "pt_api": [ + "torch.triu_indices", + { + "row": "REQUIRED", + "col": "REQUIRED", + "offset": 0.0, + "dtype": "torch.long", + "device": "cpu", + "layout": "torch.strided" + } + ], + "ms2pt_mapping": { + "row": "row", + "col": "col", + "offset": "offset", + "dtype": "dtype" + }, + "gen_explicit_map": null + }, + "torch.true_divide": { + "ms_api": [ + "ops.true_divide", + { + "dividend": "REQUIRED", + "divisor": "REQUIRED" + } + ], + "pt_api": [ + "torch.true_divide", + { + "dividend": "REQUIRED", + "divisor": "REQUIRED", + "out": "REQUIRED" + } + ], + "ms2pt_mapping": { + "dividend": "dividend", + "divisor": "divisor" + }, + "gen_explicit_map": null + }, + "torch.trunc": { + "ms_api": [ + "ops.trunc", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.trunc", + { + "input": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.unbind": { + "ms_api": [ + "ops.unbind", + { + "input": "REQUIRED", + "dim": 0.0 + } + ], + "pt_api": [ + "torch.unbind", + { + "input": "REQUIRED", + "dim": 0.0 + } + ], + "ms2pt_mapping": { + "input": "input", + "dim": "dim" + }, + "gen_explicit_map": null + }, + "torch.unique": { + "ms_api": [ + "ops.unique", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.unique", + { + "input": "REQUIRED", + "sorted": true, + "return_inverse": false, + "return_counts": false, + "dim": null + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.unique_consecutive": { + "ms_api": [ + "ops.unique_consecutive", + { + "input": "REQUIRED", + "return_idx": false, + "return_counts": false, + "axis": null + } + ], + "pt_api": [ + "torch.unique_consecutive", + { + "*args": "REQUIRED", + "**kwargs": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "*args" + }, + "gen_explicit_map": null + }, + "torch.unsqueeze": { + "ms_api": [ + "ops.unsqueeze", + { + "input": "REQUIRED", + "dim": "REQUIRED" + } + ], + "pt_api": [ + "torch.unsqueeze", + { + "input": "REQUIRED", + "dim": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input", + "dim": "dim" + }, + "gen_explicit_map": null + }, + "torch.vander": { + "ms_api": [ + "ops.vander", + { + "x": "REQUIRED", + "N": null + } + ], + "pt_api": [ + "torch.vander", + { + "x": "REQUIRED", + "N": null, + "increasing": false + } + ], + "ms2pt_mapping": { + "x": "x", + "N": "N" + }, + "gen_explicit_map": null + }, + "torch.var": { + "ms_api": [ + "ops.var", + { + "input": "REQUIRED", + "axis": null, + "ddof": 0.0, + "keepdims": false + } + ], + "pt_api": [ + "torch.var", + { + "input": "REQUIRED", + "dim": null, + "correction": 1.0, + "keepdim": false, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + "torch.var_mean": { + "ms_api": [ + "ops.var_mean", + { + "input": "REQUIRED", + "axis": null, + "ddof": 0.0, + "keepdims": false + } + ], + "pt_api": [ + "torch.var_mean", + { + "input": "REQUIRED", + "dim": null, + "correction": 1.0, + "keepdim": false, + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "axis": "dim", + "keepdims": "keepdim" + }, + "gen_explicit_map": null + }, + "torch.view_as_real": { + "ms_api": [ + "ops.view_as_real", + { + "input": "REQUIRED" + } + ], + "pt_api": [ + "torch.view_as_real", + { + "input": "REQUIRED" + } + ], + "ms2pt_mapping": { + "input": "input" + }, + "gen_explicit_map": null + }, + "torch.vstack": { + "ms_api": [ + "ops.vstack", + { + "inputs": "REQUIRED" + } + ], + "pt_api": [ + "torch.vstack", + { + "tensors": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "inputs": "tensors" + }, + "gen_explicit_map": null + }, + "torch.where": { + "ms_api": [ + "ops.where", + { + "condition": "REQUIRED", + "x": "REQUIRED", + "y": "REQUIRED" + } + ], + "pt_api": [ + "torch.where", + { + "condition": "REQUIRED", + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "condition": "condition", + "x": "input" + }, + "gen_explicit_map": null + }, + "torch.xlogy": { + "ms_api": [ + "ops.xlogy", + { + "input": "REQUIRED", + "other": "REQUIRED" + } + ], + "pt_api": [ + "torch.xlogy", + { + "input": "REQUIRED", + "other": "REQUIRED", + "out": null + } + ], + "ms2pt_mapping": { + "input": "input", + "other": "other" + }, + "gen_explicit_map": null + }, + "torch.zeros": { + "ms_api": [ + "ops.zeros", + { + "size": "REQUIRED", + "dtype": null + } + ], + "pt_api": [ + "torch.zeros", + { + "*size": "REQUIRED", + "out": null, + "dtype": null, + "layout": "torch.strided", + "device": null, + "requires_grad": false + } + ], + "ms2pt_mapping": { + }, + "gen_explicit_map": null + }, + "torch.zeros_like": { + "ms_api": [ + "ops.zeros_like", + { + "input": "REQUIRED", + "dtype": null + } + ], + "pt_api": [ + "torch.zeros_like", + { + "input": "REQUIRED", + "dtype": null, + "layout": null, + "device": null, + "requires_grad": false, + "memory_format": "torch.preserve_format" + } + ], + "ms2pt_mapping": { + "input": "input", + "dtype": "dtype" + }, + "gen_explicit_map": null + } +} \ No newline at end of file diff --git a/mindconverter/mindconverter/mappings/torch_optim_mappings.json b/mindconverter/mindconverter/mappings/torch_optim_mappings.json new file mode 100644 index 0000000000000000000000000000000000000000..5305c4e68fda8f5e4f5a2eb4482a51f51da714e0 --- /dev/null +++ b/mindconverter/mindconverter/mappings/torch_optim_mappings.json @@ -0,0 +1,765 @@ +{ + "optim.Adadelta": { + "ms_api": [ + "nn.Adadelta", + { + "params": "REQUIRED", + "learning_rate": 1.0, + "rho": 0.9, + "epsilon": 1e-06, + "loss_scale": 1.0, + "weight_decay": 0.0 + } + ], + "pt_api": [ + "optim.Adadelta", + { + "params": "REQUIRED", + "lr": 1.0, + "rho": 0.9, + "eps": 1e-06, + "weight_decay": 0.0, + "foreach": null, + "maximize": false, + "differentiable": false + } + ], + "ms2pt_mapping": { + "params": "params", + "learning_rate": "lr", + "rho": "rho", + "epsilon": "eps", + "weight_decay": "weight_decay" + }, + "gen_explicit_map": null + }, + "optim.Adagrad": { + "ms_api": [ + "nn.Adagrad", + { + "params": "REQUIRED", + "accum": 0.1, + "learning_rate": 0.001, + "update_slots": true, + "loss_scale": 1.0, + "weight_decay": 0.0 + } + ], + "pt_api": [ + "optim.Adagrad", + { + "params": "REQUIRED", + "lr": 0.01, + "lr_decay": 0.0, + "weight_decay": 0.0, + "initial_accumulator_value": 0.0, + "eps": 1e-10, + "foreach": null, + "maximize": false, + "differentiable": false + } + ], + "ms2pt_mapping": { + "params": "params", + "learning_rate": "lr", + "loss_scale": "initial_accumulator_value", + "weight_decay": "weight_decay" + }, + "gen_explicit_map": null + }, + "optim.Adam": { + "ms_api": [ + "optim.Adam", + { + "params": "REQUIRED", + "lr": 0.001, + "betas": 0.9, + "eps": 1e-08, + "weight_decay": 0.0, + "amsgrad": false, + "maximize": false + } + ], + "pt_api": [ + "optim.Adam", + { + "params": "REQUIRED", + "lr": 0.001, + "betas": 0.9, + "eps": 1e-08, + "weight_decay": 0.0, + "amsgrad": false, + "foreach": null, + "maximize": false, + "capturable": false, + "differentiable": false, + "fused": null + } + ], + "ms2pt_mapping": { + "params": "params", + "lr": "lr", + "betas": "betas", + "eps": "eps", + "weight_decay": "weight_decay", + "amsgrad": "amsgrad", + "maximize": "maximize" + }, + "gen_explicit_map": null + }, + "optim.AdaMax": { + "ms_api": [ + "nn.AdaMax", + { + "params": "REQUIRED", + "learning_rate": 0.001, + "beta1": 0.9, + "beta2": 0.999, + "eps": 1e-08, + "weight_decay": 0.0, + "loss_scale": 1.0 + } + ], + "pt_api": [ + "optim.AdaMax", + {} + ], + "ms2pt_mapping": { + "learning_rate": "lr" + }, + "gen_explicit_map": null + }, + "optim.AdamW": { + "ms_api": [ + "optim.AdamW", + { + "params": "REQUIRED", + "lr": 0.001, + "betas": 0.9, + "eps": 1e-08, + "weight_decay": 0.01, + "amsgrad": false, + "maximize": false + } + ], + "pt_api": [ + "optim.AdamW", + { + "params": "REQUIRED", + "lr": 0.001, + "betas": 0.9, + "eps": 1e-08, + "weight_decay": 0.01, + "amsgrad": false, + "maximize": false, + "foreach": null, + "capturable": false, + "differentiable": false, + "fused": null + } + ], + "ms2pt_mapping": { + "params": "params", + "lr": "lr", + "betas": "betas", + "eps": "eps", + "weight_decay": "weight_decay", + "amsgrad": "amsgrad", + "maximize": "maximize" + }, + "gen_explicit_map": null + }, + "optim.ASGD": { + "ms_api": [ + "nn.ASGD", + { + "params": "REQUIRED", + "learning_rate": 0.1, + "lambd": 0.0001, + "alpha": 0.75, + "t0": 1000000.0, + "weight_decay": 0.0 + } + ], + "pt_api": [ + "optim.ASGD", + { + "params": "REQUIRED", + "lr": 0.01, + "lambd": 0.0001, + "alpha": 0.75, + "t0": 1000000.0, + "weight_decay": 0.0, + "foreach": null, + "maximize": false, + "differentiable": false + } + ], + "ms2pt_mapping": { + "params": "params", + "learning_rate": "lr", + "lambd": "lambd", + "alpha": "alpha", + "t0": "t0", + "weight_decay": "weight_decay" + }, + "gen_explicit_map": null + }, + "optim.Optimizer": { + "ms_api": [ + "nn.Optimizer", + { + "learning_rate": "REQUIRED", + "parameters": "REQUIRED", + "weight_decay": 0.0, + "loss_scale": 1.0 + } + ], + "pt_api": [ + "optim.Optimizer", + {} + ], + "ms2pt_mapping": { + "learning_rate": "lr" + }, + "gen_explicit_map": null + }, + "optim.SparseAdam": { + "ms_api": [ + "nn.LazyAdam", + { + "params": "REQUIRED", + "learning_rate": 0.001, + "beta1": 0.9, + "beta2": 0.999, + "eps": 1e-08, + "use_locking": false, + "use_nesterov": false, + "weight_decay": 0.0, + "loss_scale": 1.0 + } + ], + "pt_api": [ + "optim.SparseAdam", + { + "params": "REQUIRED", + "lr": 0.001, + "betas": 0.9, + "eps": 1e-08, + "maximize": false + } + ], + "ms2pt_mapping": { + "params": "params", + "learning_rate": "lr", + "beta1": "betas", + "eps": "eps" + }, + "gen_explicit_map": null + }, + "optim.RMSProp": { + "ms_api": [ + "nn.RMSProp", + { + "params": "REQUIRED", + "learning_rate": 0.1, + "decay": 0.9, + "momentum": 0.0, + "epsilon": 1e-10, + "use_locking": false, + "centered": false, + "loss_scale": 1.0, + "weight_decay": 0.0 + } + ], + "pt_api": [ + "optim.RMSProp", + {} + ], + "ms2pt_mapping": { + "learning_rate": "lr" + }, + "gen_explicit_map": null + }, + "optim.Rprop": { + "ms_api": [ + "nn.Rprop", + { + "params": "REQUIRED", + "learning_rate": 0.1, + "etas": 0.5, + "step_sizes": 1e-06, + "weight_decay": 0.0 + } + ], + "pt_api": [ + "optim.Rprop", + { + "params": "REQUIRED", + "lr": 0.01, + "etas": 0.5, + "step_sizes": 1e-06, + "foreach": null, + "maximize": false, + "differentiable": false + } + ], + "ms2pt_mapping": { + "params": "params", + "learning_rate": "lr", + "etas": "etas", + "step_sizes": "step_sizes" + }, + "gen_explicit_map": null + }, + "optim.SGD": { + "ms_api": [ + "optim.SGD", + { + "params": "REQUIRED", + "lr": "REQUIRED", + "momentum": 0.0, + "dampening": 0.0, + "weight_decay": 0.0, + "nesterov": false, + "maximize": false + } + ], + "pt_api": [ + "optim.SGD", + { + "params": "REQUIRED", + "lr": "REQUIRED", + "momentum": 0.0, + "dampening": 0.0, + "weight_decay": 0.0, + "nesterov": false, + "maximize": false, + "foreach": null, + "differentiable": false + } + ], + "ms2pt_mapping": { + "params": "params", + "lr": "lr", + "momentum": "momentum", + "dampening": "dampening", + "weight_decay": "weight_decay", + "nesterov": "nesterov", + "maximize": "maximize" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.CosineAnnealingLR": { + "ms_api": [ + "optim.lr_scheduler.CosineAnnealingLR", + { + "optimizer": "REQUIRED", + "T_max": "REQUIRED", + "eta_min": 0.0, + "last_epoch": "- 1" + } + ], + "pt_api": [ + "optim.lr_scheduler.CosineAnnealingLR", + { + "optimizer": "REQUIRED", + "T_max": "REQUIRED", + "eta_min": 0.0, + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "T_max": "T_max", + "eta_min": "eta_min", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.ExponentialLR": { + "ms_api": [ + "optim.lr_scheduler.ExponentialLR", + { + "optimizer": "REQUIRED", + "gamma": "REQUIRED", + "last_epoch": "- 1" + } + ], + "pt_api": [ + "optim.lr_scheduler.ExponentialLR", + { + "optimizer": "REQUIRED", + "gamma": "REQUIRED", + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "gamma": "gamma", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.MultiStepLR": { + "ms_api": [ + "optim.lr_scheduler.MultiStepLR", + { + "optimizer": "REQUIRED", + "milestones": "REQUIRED", + "gamma": 0.1, + "last_epoch": "- 1" + } + ], + "pt_api": [ + "optim.lr_scheduler.MultiStepLR", + { + "optimizer": "REQUIRED", + "milestones": "REQUIRED", + "gamma": 0.1, + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "milestones": "milestones", + "gamma": "gamma", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.StepLR": { + "ms_api": [ + "optim.lr_scheduler.StepLR", + { + "optimizer": "REQUIRED", + "step_size": "REQUIRED", + "gamma": 0.1, + "last_epoch": "- 1" + } + ], + "pt_api": [ + "optim.lr_scheduler.StepLR", + { + "optimizer": "REQUIRED", + "step_size": "REQUIRED", + "gamma": 0.1, + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "step_size": "step_size", + "gamma": "gamma", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.LRScheduler": { + "ms_api": [ + "optim.lr_scheduler.LRScheduler", + { + "optimizer": "REQUIRED", + "last_epoch": "- 1" + } + ], + "pt_api": [ + "optim.lr_scheduler.LRScheduler", + {} + ], + "ms2pt_mapping": {}, + "gen_explicit_map": null + }, + "optim.lr_scheduler.ConstantLR": { + "ms_api": [ + "optim.lr_scheduler.ConstantLR", + { + "optimizer": "REQUIRED", + "factor": "1.0 / 3", + "total_iters": 5.0, + "last_epoch": -1 + } + ], + "pt_api": [ + "optim.lr_scheduler.ConstantLR", + { + "optimizer": "REQUIRED", + "factor": 0.3333333333333333, + "total_iters": 5.0, + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "factor": "factor", + "total_iters": "total_iters", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.CosineAnnealingWarmRestarts": { + "ms_api": [ + "optim.lr_scheduler.CosineAnnealingWarmRestarts", + { + "optimizer": "REQUIRED", + "T_0": "REQUIRED", + "T_mult": 1.0, + "eta_min": 0.0, + "last_epoch": -1 + } + ], + "pt_api": [ + "optim.lr_scheduler.CosineAnnealingWarmRestarts", + { + "optimizer": "REQUIRED", + "T_0": "REQUIRED", + "T_mult": 1.0, + "eta_min": 0.0, + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "T_0": "T_0", + "T_mult": "T_mult", + "eta_min": "eta_min", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.CyclicLR": { + "ms_api": [ + "optim.lr_scheduler.CyclicLR", + { + "optimizer": "REQUIRED", + "base_lr": "REQUIRED", + "max_lr": "REQUIRED", + "step_size_up": 2000.0, + "step_size_down": null, + "mode": "triangular", + "gamma": 1.0, + "scale_fn": null, + "scale_mode": "cycle", + "last_epoch": -1 + } + ], + "pt_api": [ + "optim.lr_scheduler.CyclicLR", + { + "optimizer": "REQUIRED", + "base_lr": "REQUIRED", + "max_lr": "REQUIRED", + "step_size_up": 2000.0, + "step_size_down": null, + "mode": "triangular", + "gamma": 1.0, + "scale_fn": null, + "scale_mode": "cycle", + "cycle_momentum": true, + "base_momentum": 0.8, + "max_momentum": 0.9, + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "base_lr": "base_lr", + "max_lr": "max_lr", + "step_size_up": "step_size_up", + "step_size_down": "step_size_down", + "mode": "mode", + "gamma": "gamma", + "scale_fn": "scale_fn", + "scale_mode": "scale_mode", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.LambdaLR": { + "ms_api": [ + "optim.lr_scheduler.LambdaLR", + { + "optimizer": "REQUIRED", + "lr_lambda": "REQUIRED", + "last_epoch": "- 1" + } + ], + "pt_api": [ + "optim.lr_scheduler.LambdaLR", + { + "optimizer": "REQUIRED", + "lr_lambda": "REQUIRED", + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "lr_lambda": "lr_lambda", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.LinearLR": { + "ms_api": [ + "optim.lr_scheduler.LinearLR", + { + "optimizer": "REQUIRED", + "start_factor": "1.0 / 3", + "end_factor": 1.0, + "total_iters": 5.0, + "last_epoch": -1 + } + ], + "pt_api": [ + "optim.lr_scheduler.LinearLR", + { + "optimizer": "REQUIRED", + "start_factor": 0.3333333333333333, + "end_factor": 1.0, + "total_iters": 5.0, + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "start_factor": "start_factor", + "end_factor": "end_factor", + "total_iters": "total_iters", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.MultiplicativeLR": { + "ms_api": [ + "optim.lr_scheduler.MultiplicativeLR", + { + "optimizer": "REQUIRED", + "lr_lambda": "REQUIRED", + "last_epoch": -1 + } + ], + "pt_api": [ + "optim.lr_scheduler.MultiplicativeLR", + { + "optimizer": "REQUIRED", + "lr_lambda": "REQUIRED", + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "lr_lambda": "lr_lambda", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.PolynomialLR": { + "ms_api": [ + "optim.lr_scheduler.PolynomialLR", + { + "optimizer": "REQUIRED", + "total_iters": 5.0, + "power": 1.0, + "last_epoch": -1 + } + ], + "pt_api": [ + "optim.lr_scheduler.PolynomialLR", + { + "optimizer": "REQUIRED", + "total_iters": 5.0, + "power": 1.0, + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "total_iters": "total_iters", + "power": "power", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.ReduceLROnPlateau": { + "ms_api": [ + "optim.lr_scheduler.ReduceLROnPlateau", + { + "optimizer": "REQUIRED", + "mode": "min", + "factor": 0.1, + "patience": 10.0, + "threshold": 0.0001, + "threshold_mode": "rel", + "cooldown": 0.0, + "min_lr": 0.0, + "eps": 1e-08 + } + ], + "pt_api": [ + "optim.lr_scheduler.ReduceLROnPlateau", + { + "optimizer": "REQUIRED", + "mode": "min", + "factor": 0.1, + "patience": 10.0, + "threshold": 0.0001, + "threshold_mode": "rel", + "cooldown": 0.0, + "min_lr": 0.0, + "eps": 1e-08, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "mode": "mode", + "factor": "factor", + "patience": "patience", + "threshold": "threshold", + "threshold_mode": "threshold_mode", + "cooldown": "cooldown", + "min_lr": "min_lr", + "eps": "eps" + }, + "gen_explicit_map": null + }, + "optim.lr_scheduler.SequentialLR": { + "ms_api": [ + "optim.lr_scheduler.SequentialLR", + { + "optimizer": "REQUIRED", + "schedulers": "REQUIRED", + "milestones": "REQUIRED", + "last_epoch": -1 + } + ], + "pt_api": [ + "optim.lr_scheduler.SequentialLR", + { + "optimizer": "REQUIRED", + "schedulers": "REQUIRED", + "milestones": "REQUIRED", + "last_epoch": -1.0, + "verbose": false + } + ], + "ms2pt_mapping": { + "optimizer": "optimizer", + "schedulers": "schedulers", + "milestones": "milestones", + "last_epoch": "last_epoch" + }, + "gen_explicit_map": null + } +} \ No newline at end of file diff --git a/mindconverter/mindconverter/modeltest/GoogleNet.py b/mindconverter/mindconverter/modeltest/GoogleNet.py new file mode 100644 index 0000000000000000000000000000000000000000..e88cd6fb2be41fb47b5903292a8bcb00bcf9acd7 --- /dev/null +++ b/mindconverter/mindconverter/modeltest/GoogleNet.py @@ -0,0 +1,281 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +''' + CNN经典网络结构复现:LeNet5、AlexNet、VGG、ResNet、GoogleNet、InceptionNet等 + GoogleNet V1版本使用的是Inception V1模块,是在Inception版本上升级,加入了1x1的卷积层,目的是减少参数和计算量。 + 该版本比naive inception多了三个1x1的卷积层 + 在第一个分支branch1上不做改变 + 在第二个分支branch2上先经过一个1x1的卷积层,然后再经过3x3的卷积层。 + 在第三个分支branch3上也要先经过一个1x1的卷积层,然后再经过5x5的卷积层。 + 在第四个分支branch4上先经过一个3x3的max pooling ,然后再使用1x1的卷积层进行降维。 + Inception V1模块结构: + + 特征拼接 + / / \ \ + 1x1 conv 3x3 conv 5x5 conv 1x1 conv + | | | | + | 1x1 conv 1x1 conv 3x3 max pooling + \ \ / / + 上一层 + + 四个分支,分别做卷积,然后拼接输出。 + GoogleNet类 + +''' + + +# 定义一个基础的卷积类,包含一个卷积层和一个ReLu激活层,正向传播函数 +class BasicConv2d(nn.Module): + def __init__(self, in_channels, out_channels, **kwargs): + """ + :param in_channels: 输入channels + :param out_channels: 输出的channels + :param kwargs: **kwargs 允许你将不定长度的键值对, 作为参数传递给一个函数。 如果你想要在一个函数里处理带名字的参数, 你应该使用**kwargs + """ + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, **kwargs) + self.relu = nn.ReLU(inplace=True) # inplace-选择是否进行覆盖运算 + + def forward(self, x): + x = self.conv(x) + x = self.relu(x) + return x + + +# 定义Inception模块 +class Inception(nn.Module): + def __init__(self, in_channels, ch1x1, ch3x3_reduce, ch3x3, ch5x5_reduce, ch5x5, pool_proj): + """ + :param in_channels: 输入的深度 + :param ch1x1: 第一个分支的1x1的输出 + :param ch3x3_reduce: 第二个分支的1x1卷积的输出 + :param ch3x3:第二个分支的3x3的输出 + :param ch5x5_reduce:第三个分支的1x1的输出 + :param ch5x5:第三个分支的5x5的输出 + :param pool_proj:第四个分支的输出 + """ + super(Inception, self).__init__() + # 第一个分支 + self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1) + # 第二个分支 + self.branch2 = nn.Sequential( + # 第一个是1x1的卷积层 + BasicConv2d(in_channels, ch3x3_reduce, kernel_size=1), + # 第二个是3x3的卷积层,需要padding=1,保证输出的w*h不变 + BasicConv2d(ch3x3_reduce, ch3x3, kernel_size=3, padding=1) + ) + # 第三个分支 + self.branch3 = nn.Sequential( + # 第一个是1x1的卷积层,目的是降维 + BasicConv2d(in_channels, ch5x5_reduce, kernel_size=1), + # 第二个是5x5的卷积层,需要padding=2,保证输出的w*h不变 + BasicConv2d(ch5x5_reduce, ch5x5, kernel_size=5, padding=2) + ) + # 第四个分支 + self.branch4 = nn.Sequential( + # 首先要经过一个3x3的池化 + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + # 然后经经过1x1的卷积,进行降维 + nn.Conv2d(in_channels, pool_proj, kernel_size=1) + ) + + # 正向传播 + def forward(self, x): + b1 = self.branch1(x) + b2 = self.branch2(x) + b3 = self.branch3(x) + b4 = self.branch4(x) + # 将四个分支的结果,进行拼接,dim=1 表示在channel维度上进行。batchsize*channel*width*height + out = [b1, b2, b3, b4] + return torch.cat(out, dim=1) + + +# 辅助分类器设计 +class InceptionAux(nn.Module): + def __init__(self, in_channels, num_classes): + super(InceptionAux, self).__init__() + # 输入:in_channels*14*14 输出:in_channels*4*4 + # self.avg_pool = nn.AvgPool2d(kernel_size=5,stride=3) + # 或者试用自定义池化,固定输出尺寸 + self.avg_pool = nn.AdaptiveAvgPool2d((4, 4)) + # 经过1x1的卷积,进行降维 + self.conv = BasicConv2d(in_channels, 128, kernel_size=1) + # 定义一个relu + self.relu = nn.ReLU(inplace=True) + # 定义一个dropout + self.dropout = nn.Dropout(0.5) + # 第一个全连接 128*4*4 + self.fc1 = nn.Linear(2048, 1024) + # 第二个全连接 + self.fc2 = nn.Linear(1024, num_classes) + + # 正向传播 + def forward(self, x): + # 辅助分类器aux1 是从inception(4a)处分支:N*512*14*14 + # 辅助分类器aux2 是从inception(4d)处分支:N*528*14*14 + x = self.avg_pool(x) + # aux1:N*512*4*4 aux2:N*528*4*4 + # 使用1x1的卷积层进行降维到128 + print('aux+++', x.size()) + x = self.conv(x) + print('aux----',x.size()) + # N*128*4*4 + x = torch.flatten(x, 1) + x = self.relu(x) + # N*2048 + x = self.fc1(x) + # 使用nn.functional里面的函数 + x = F.dropout(x, 0.5, training=self.training) + # x = self.dropout(x) + # N*1024 + x = self.fc2(x) + # N*1000(num_classes) + return x + + +# GoogleNet主体类设计 +class GoogleNet(nn.Module): + def __init__(self, num_classes=1000, aux_logits=False, init_weights=None): + """ + :param num_classes: 分类数 + :param aux_logits: 是否采用辅助分类器,默认是 + :param init_weights: 初始化权重 + """ + super(GoogleNet, self).__init__() + # 分类数 + self.num_classes = num_classes + # 是否采用辅助分类器 + self.aux_logits = aux_logits + # 输入:N*3*224*224 输出:N*64*112*112 + self.conv1 = BasicConv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3) + # ceil_mode=True 表示向上取整,默认是false向下取整 + # 输入:N*64*112*112 输出:N*64*56*56 + self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) + + # 输入:N*64*56*56 输出:N*64*56*56 + self.conv2 = BasicConv2d(64, 64, kernel_size=1) + # 输入:N*64*56*56 输出:N*192*56*56 + self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1) + # 池化输入:N*192*56*56 输出:N*192*28*28 + self.max_pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) + + # 接下来是Inception模块,将表格中对应的参数放进去就行了 + # 输入:N*192*28*28 -> N*256*28*28 + self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32) + # 输入:N*256*28*28 -> N*480*28*28 + # 256=64+128+32+32 -> 480=128+192+96+64 以下都是类似的计算 + self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64) + # 最大池化 N*480*28*28 -> N*480*14*14 + self.max_pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) + + # N*480*14*14 -> N*512*14*14 + self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64) + # N*512*14*14 -> N*512*14*14 + self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64) + # N*512*14*14 -> N*512*14*14 + self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64) + # N*512*14*14 -> N*528*14*14 + self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64) + # N*528*14*14 -> N*832*14*14 + self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128) + # 最大池化 N*832*14*14 -> N*832*7*7 + self.max_pool4 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) + + # N*832*7*7 -> N*832*7*7 + self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128) + # N*832*7*7 -> N*832*7*7 + self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128) + + # 是否使用辅助分类器 + if aux_logits: + self.aux1 = InceptionAux(512, self.num_classes) + self.aux2 = InceptionAux(528, self.num_classes) + else: + self.aux1 = None + self.aux2 = None + + # 自定义池化 + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + # 使用dropout + self.dropout = nn.Dropout(0.5) + # 全连接输出 + self.fc = nn.Linear(1024, self.num_classes) + + # 初始化权重 + # if init_weights: + # self._init_weights() + + # 前向传播 + def forward(self, x): + # N*3*224*224 + x = self.conv1(x) + # N*64*112*112 + x = self.max_pool1(x) + print('pool1',x.size()) + # N*64*56*56 + x = self.conv2(x) + # N*64*56*56 + x = self.conv3(x) + # N*192*56*56 + x = self.max_pool2(x) + print('pool2', x.size()) + # N*192*28*28 + x = self.inception3a(x) + print('3a', x.size()) + # N*256*28*28 + x = self.inception3b(x) + # N*480*28*28 + x = self.max_pool3(x) + # N*480*14*14 + x = self.inception4a(x) + + # N*512*14*14 + # 是否使用辅助分类器 同时是否是训练模式 + if self.aux1 is not None and self.training: + aux1 = self.aux1(x) + + x = self.inception4b(x) + # N*512*14*14 + x = self.inception4c(x) + # N*512*14*14 + x = self.inception4d(x) + + # N*528*14*14 + # 是否使用辅助分类器 同时是否是训练模式 + if self.aux2 is not None and self.training: + aux2 = self.aux2(x) + + x = self.inception4e(x) + # N*832*14*14 + x = self.max_pool4(x) + # N*832*7*7 + x = self.inception5a(x) + # N*832*7*7 + x = self.inception5b(x) + # N*1024*7*7 + x = self.avg_pool(x) + # 展平操作 + # x = torch.flatten(x, 1) + x = x.view(x.size(0), -1) + print(x.size()) + # N*1024 + x = self.dropout(x) + # 全连接 + x = self.fc(x) + # N*1000(num_classes) + if self.training and self.aux_logits: + return x, aux1, aux2 + return x + + # 初始化权重 + def _init_weights(self): + for m in self.modules(): + if isinstance(m,nn.Conv2d): + nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias,0) + elif isinstance(m,nn.Linear): + nn.init.normal_(m.weight,0,0.01) + nn.init.constant_(m.bias,0) \ No newline at end of file diff --git a/mindconverter/mindconverter/modeltest/covLstm.py b/mindconverter/mindconverter/modeltest/covLstm.py new file mode 100644 index 0000000000000000000000000000000000000000..5ef44d95cf9d0c6ab3c7006b5ba66a267ec18d60 --- /dev/null +++ b/mindconverter/mindconverter/modeltest/covLstm.py @@ -0,0 +1,191 @@ +import torch.nn as nn +import torch + + +class ConvLSTMCell(nn.Module): + + def __init__(self, input_dim, hidden_dim, kernel_size, bias): + """ + Initialize ConvLSTM cell. + + Parameters + ---------- + input_dim: int + Number of channels of input tensor. + hidden_dim: int + Number of channels of hidden state. + kernel_size: (int, int) + Size of the convolutional kernel. + bias: bool + Whether or not to add the bias. + """ + + super(ConvLSTMCell, self).__init__() + + self.input_dim = input_dim + self.hidden_dim = hidden_dim + + self.kernel_size = kernel_size + self.padding = kernel_size[0] // 2, kernel_size[1] // 2 + self.bias = bias + + self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim, + out_channels=4 * self.hidden_dim, + kernel_size=self.kernel_size, + padding=self.padding, + bias=self.bias) + + def forward(self, input_tensor, cur_state): + h_cur, c_cur = cur_state + + combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis + + combined_conv = self.conv(combined) + cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1) + i = torch.sigmoid(cc_i) + f = torch.sigmoid(cc_f) + o = torch.sigmoid(cc_o) + g = torch.tanh(cc_g) + + c_next = f * c_cur + i * g + h_next = o * torch.tanh(c_next) + + return h_next, c_next + + def init_hidden(self, batch_size, image_size): + height, width = image_size + return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device), + torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device)) + + +class ConvLSTM(nn.Module): + + """ + + Parameters: + input_dim: Number of channels in input + hidden_dim: Number of hidden channels + kernel_size: Size of kernel in convolutions + num_layers: Number of LSTM layers stacked on each other + batch_first: Whether or not dimension 0 is the batch or not + bias: Bias or no bias in Convolution + return_all_layers: Return the list of computations for all layers + Note: Will do same padding. + + Input: + A tensor of size B, T, C, H, W or T, B, C, H, W + Output: + A tuple of two lists of length num_layers (or length 1 if return_all_layers is False). + 0 - layer_output_list is the list of lists of length T of each output + 1 - last_state_list is the list of last states + each element of the list is a tuple (h, c) for hidden state and memory + Example: + >> x = torch.rand((32, 10, 64, 128, 128)) + >> convlstm = ConvLSTM(64, 16, 3, 1, True, True, False) + >> _, last_states = convlstm(x) + >> h = last_states[0][0] # 0 for layer index, 0 for h index + """ + + def __init__(self, input_dim, hidden_dim, kernel_size, num_layers, + batch_first=False, bias=True, return_all_layers=False): + super(ConvLSTM, self).__init__() + + self._check_kernel_size_consistency(kernel_size) + + # Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers + kernel_size = self._extend_for_multilayer(kernel_size, num_layers) + hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers) + if not len(kernel_size) == len(hidden_dim) == num_layers: + raise ValueError('Inconsistent list length.') + + self.input_dim = input_dim + self.hidden_dim = hidden_dim + self.kernel_size = kernel_size + self.num_layers = num_layers + self.batch_first = batch_first + self.bias = bias + self.return_all_layers = return_all_layers + + cell_list = [] + for i in range(0, self.num_layers): + cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1] + + cell_list.append(ConvLSTMCell(input_dim=cur_input_dim, + hidden_dim=self.hidden_dim[i], + kernel_size=self.kernel_size[i], + bias=self.bias)) + + self.cell_list = nn.ModuleList(cell_list) + + def forward(self, input_tensor, hidden_state=None): + """ + + Parameters + ---------- + input_tensor: todo + 5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w) + hidden_state: todo + None. todo implement stateful + + Returns + ------- + last_state_list, layer_output + """ + if not self.batch_first: + # (t, b, c, h, w) -> (b, t, c, h, w) + input_tensor = input_tensor.permute(1, 0, 2, 3, 4) + + b, _, _, h, w = input_tensor.size() + + # Implement stateful ConvLSTM + if hidden_state is not None: + raise NotImplementedError() + else: + # Since the init is done in forward. Can send image size here + hidden_state = self._init_hidden(batch_size=b, + image_size=(h, w)) + + layer_output_list = [] + last_state_list = [] + + seq_len = input_tensor.size(1) + cur_layer_input = input_tensor + + for layer_idx in range(self.num_layers): + + h, c = hidden_state[layer_idx] + output_inner = [] + for t in range(seq_len): + h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :], + cur_state=[h, c]) + output_inner.append(h) + + layer_output = torch.stack(output_inner, dim=1) + cur_layer_input = layer_output + + layer_output_list.append(layer_output) + last_state_list.append([h, c]) + + if not self.return_all_layers: + layer_output_list = layer_output_list[-1:] + last_state_list = last_state_list[-1:] + + return layer_output_list, last_state_list + + def _init_hidden(self, batch_size, image_size): + init_states = [] + for i in range(self.num_layers): + init_states.append(self.cell_list[i].init_hidden(batch_size, image_size)) + return init_states + + @staticmethod + def _check_kernel_size_consistency(kernel_size): + if not (isinstance(kernel_size, tuple) or + (isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))): + raise ValueError('`kernel_size` must be tuple or list of tuples') + + @staticmethod + def _extend_for_multilayer(param, num_layers): + if not isinstance(param, list): + param = [param] * num_layers + return param \ No newline at end of file diff --git a/mindconverter/mindconverter/modeltest/densenet.py b/mindconverter/mindconverter/modeltest/densenet.py new file mode 100644 index 0000000000000000000000000000000000000000..1c8a605b98adbed07c2ae464615b716588de3028 --- /dev/null +++ b/mindconverter/mindconverter/modeltest/densenet.py @@ -0,0 +1,103 @@ +import re +import torch +import torch.nn as nn +import torch.nn.functional as F +from collections import OrderedDict + +class _DenseLayer(nn.Sequential): + def __init__(self, num_input_features, growth_rate, bn_size, drop_rate): + super(_DenseLayer, self).__init__() + self.add_module('norm1', nn.BatchNorm2d(num_input_features)), + self.add_module('relu1', nn.ReLU(inplace=True)), + self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * + growth_rate, kernel_size=1, stride=1, bias=False)), + self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), + self.add_module('relu2', nn.ReLU(inplace=True)), + self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, + kernel_size=3, stride=1, padding=1, bias=False)), + self.drop_rate = drop_rate + + def forward(self, x): + new_features = super(_DenseLayer, self).forward(x) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return torch.cat([x, new_features], 1) + +class _DenseBlock(nn.Sequential): + def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate): + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate) + self.add_module('denselayer%d' % (i + 1), layer) + + +class _Transition(nn.Sequential): + def __init__(self, num_input_features, num_output_features): + super(_Transition, self).__init__() + self.add_module('norm', nn.BatchNorm2d(num_input_features)) + self.add_module('relu', nn.ReLU(inplace=True)) + self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, + kernel_size=1, stride=1, bias=False)) + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" `_ + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + num_init_features (int) - the number of filters to learn in the first convolution layer + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + """ + + def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), + num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000): + + super(DenseNet, self).__init__() + + # First convolution + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), + ('norm0', nn.BatchNorm2d(num_init_features)), + ('relu0', nn.ReLU(inplace=True)), + ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), + ])) + + # Each denseblock + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, + bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate) + self.features.add_module('denseblock%d' % (i + 1), block) + num_features = num_features + num_layers * growth_rate + if i != len(block_config) - 1: + trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2) + self.features.add_module('transition%d' % (i + 1), trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', nn.BatchNorm2d(num_features)) + + # Linear layer + self.classifier = nn.Linear(num_features, num_classes) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def forward(self, x): + features = self.features(x) + out = F.relu(features, inplace=True) + out = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1) + out = self.classifier(out) + return out \ No newline at end of file diff --git a/mindconverter/mindconverter/modeltest/lenet.py b/mindconverter/mindconverter/modeltest/lenet.py new file mode 100644 index 0000000000000000000000000000000000000000..d58e51fc3c7e03a4cc07a73611ca4badc69baa04 --- /dev/null +++ b/mindconverter/mindconverter/modeltest/lenet.py @@ -0,0 +1,24 @@ +import torch.nn as nn +import torch.nn.functional as F + + +class TestLeNet(nn.Module): + """TestLeNet network.""" + def __init__(self): + self.conv1 = nn.Conv2d(3, 6, 5) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, input_x): + """Callback method.""" + out = F.relu(self.conv1(input_x)) + out = F.max_pool2d(out, 2) + out = F.relu(self.conv2(out)) + out = F.max_pool2d(out, 2) + out = out.view(out.size(0), -1) + out = F.relu(self.fc1(out)) + out = F.relu(self.fc2(out)) + out = self.fc3(out) + return out diff --git a/mindconverter/mindconverter/modeltest/model.py b/mindconverter/mindconverter/modeltest/model.py new file mode 100644 index 0000000000000000000000000000000000000000..f91f8141419b62e4fd704fa36c39c78172ad0127 --- /dev/null +++ b/mindconverter/mindconverter/modeltest/model.py @@ -0,0 +1,39 @@ +from torch import nn +from torch import optim + + +class Model(nn.Module): + def __init__(self): + super(Model, self).__init__() + self.conv1 = nn.Conv2d(1, 6, 5) + self.relu1 = nn.ReLU() + self.pool1 = nn.MaxPool2d(2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.relu2 = nn.ReLU() + self.pool2 = nn.MaxPool2d(2) + self.fc1 = nn.Linear(256, 120) + self.relu3 = nn.ReLU() + self.fc2 = nn.Linear(120, 84) + self.relu4 = nn.ReLU() + self.fc3 = nn.Linear(84, 10) + self.relu5 = nn.ReLU() + + def forward(self, x): + y = self.conv1(x) + y = self.relu1(y) + y = self.pool1(y) + y = self.conv2(y) + y = self.relu2(y) + y = self.pool2(y) + y = y.view(y.shape[0], -1) + y = self.fc1(y) + y = self.relu3(y) + y = self.fc2(y) + y = self.relu4(y) + y = self.fc3(y) + y = self.relu5(y) + y.addmv(1,2,1,1) + opti = optim.Adadelta(y,0.9,0.5,0.5,0.6) + opti2 = optim.Adagrad(y,0.9,0.5,0.5,0.6) + opti3 = optim.lr_scheduler.CosineAnnealingLR(opti, 100, 0.5, 0, False) + return y diff --git a/mindconverter/mindconverter/modeltest/ninnet.py b/mindconverter/mindconverter/modeltest/ninnet.py new file mode 100644 index 0000000000000000000000000000000000000000..ed6f473f6c9363030cd074e7f7389b7050bd7a77 --- /dev/null +++ b/mindconverter/mindconverter/modeltest/ninnet.py @@ -0,0 +1,121 @@ +import torch +import torch.nn as nn + +""" + CNN经典网络结构复现:LeNet5、AlexNet、VGG、ResNet、InceptionNet等 + 本文实现NinNet 全称:Network in Network + 该网络是2014年发表的,主要有两个创新点: + 1.使用了MLP Convolution layers 卷积层 + 传统的CNN的卷积滤波器是底层数据块的广义线性模型(generalized linear model)GLM + 在这篇论文中使用了MLPConv 代替了GLM + MLPConv 是在常规卷积后面接入若干个1x1卷积,每个特征图视为一个神经元特征图。 + 特征图通过通过多个1x1的卷积就类似多个神经元线性组合,这样就像是MLP(多层感知器)了 + + 2.Global Average Pooling (全局平均池化) + 论文中提出使用全局平均池化代替全连接层,对最后一层的特征图进行全局平均池化,得到的结果向量直接输入到softmax层。 + 全局平均池取代完全连通层上的一个优点是,通过增强特征映射和类别之间的对应关系,它更适合于卷积结构。 + 因此,特征映射可以很容易地解释为类别信任映射。另一个优点是在全局平均池中没有优化参数,从而避免了这一层的过度拟合。 + 此外,全局平均池综合了空间信息,从而对输入的空间平移具有更强的鲁棒性。 + 重点关注:该文章是第一个使用1x1卷积的,可以实现跨通道特征融合和通道的升维降维,减少网络参数 + + 网络结构参考网上给定的参数,原论文没有给出参数: + 网络包括三个mlpconv层的nin模块和一个全局平均池化,在每个mlpconv层中,有一个三层感知器。 + 第一个nin模块: + 第一个常规卷积输入:3x224x224 kernel_size = 11 ,output=96,stride=4 输出:96x54x54 + 第二个多层感知器输入:96x54x54 kernel_size = 1 输出:96x54x54 + 第三个多层感知器输入:96x54x54 kernel_size = 1 输出:96x54x54 + 后跟一个maxpool kernel_size= 3 stride=2 输入:96x54x54 输出:96x26x26 + 第二个nin模块: + 第一个常规卷积输入:96x26x26 kernel_size = 5 ,output=256,padding=2,stride=1 输出:256x26x26 + 第二个多层感知器输入:256x26x26 kernel_size = 1 输出:256x26x26 + 第三个多层感知器输入:256x26x26 kernel_size = 1 输出:256x26x26 + 后跟一个maxpool kernel_size= 3 stride=2 输入:256x26x26 输出:256x12x12 + 第三个nin模块: + 第一个常规卷积输入:256x12x12 kernel_size = 3 ,output=384,padding=1,stride=1 输出:384x12x12 + 第二个多层感知器输入:384x12x12 kernel_size = 1 输出:384x12x12 + 第三个多层感知器输入:384x12x12 kernel_size = 1 输出:384x12x12 + 后跟一个maxpool kernel_size= 3 stride=2 输入:384x12x12 输出:384x5x5 + 第四个nin模块: + 第一个常规卷积输入:384x5x5 kernel_size = 3 ,output=num_class(分类数),padding=1,stride=1 输出:num_classx5x5 + 第二个多层感知器输入:num_classx5x5 kernel_size = 1 输出:num_classx5x5 + 第三个多层感知器输入:num_classx5x5 kernel_size = 1 输出:num_classx5x5 + 全局平均池化 + AdaptiveAvgPool2d((1,1)) +""" + + +# 定义nin模块 +def nin_block(in_channel, out_channel, kernel_size, stride, padding=0): + """ + :param in_channel: 输入通道 + :param out_channel: 输出通道 + :param kernel_size: 卷积核大小 + :param stride: 步长 + :param padding: 填充 + :return: + """ + blk = nn.Sequential( + # 第一个卷积是常规卷积 + nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=kernel_size, stride=stride, + padding=padding), + # 激活函数 + nn.ReLU(), + # 1x1卷积 + nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=1), + # 激活函数 + nn.ReLU(), + # 1x1卷积 + nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=1), + # 激活函数 + nn.ReLU() + ) + return blk + + +# 展开 + +# NinNet网络结构 +class NinNet(nn.Module): + def __init__(self, num_class=1000): + super(NinNet, self).__init__() + self.num_class = num_class + # 第一个nin模块输入:3x224x224 输出:96x54x54 + self.nin1 = nin_block(in_channel=3, out_channel=96, kernel_size=11, stride=4) + # 第一个最大池化,输入:96x54x54 输出:96x26x26 + self.max_pool1 = nn.MaxPool2d(kernel_size=3,stride=2) + # 第二个nin模块,输入:96x26x26 输出:256x26x26 + self.nin2 = nin_block(in_channel=96,out_channel=256,kernel_size=5,stride=1,padding=2) + # 第二个最大池化:输入:256x26x26 输出:256x12x12 + self.max_pool2 = nn.MaxPool2d(kernel_size=3,stride=2) + # 第三个nin模块,输入:256x12x12 输出:384x12x12 + self.nin3 = nin_block(in_channel=256,out_channel=384,kernel_size=3,stride=1,padding=1) + # 第三个最大池化,输入:384x12x12 输出:384x5x5 + self.max_pool3 = nn.MaxPool2d(kernel_size=3,stride=2) + # 第四个nin模块:输入:384x5x5 输出:num_classx5x5 + self.nin4 = nin_block(in_channel=384,out_channel=self.num_class,kernel_size=3,stride=1,padding=1) + # 加上一个dropout层 + self.dropout = nn.Dropout(0.5) + # 全局平均池化 + self.gap = nn.AdaptiveAvgPool2d((1,1)) + + def forward(self,x): + # nin模块 + x = self.nin1(x) + # 最大池化 + x = self.max_pool1(x) + # nin模块 + x = self.nin2(x) + # 最大池化 + x = self.max_pool2(x) + # nin模块 + x = self.nin3(x) + # 最大池化 + x = self.max_pool3(x) + # nin模块 + x = self.nin4(x) + # 全局平均池化GAP + x = self.gap(x) + # 展开 + x = x.view(x.size(0),-1) + + return x \ No newline at end of file diff --git a/mindconverter/mindconverter/modeltest/resnet.py b/mindconverter/mindconverter/modeltest/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..acd8c44df8d2aabef83722c70a2faf0db4d8d6aa --- /dev/null +++ b/mindconverter/mindconverter/modeltest/resnet.py @@ -0,0 +1,139 @@ +import torch.nn as nn + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = conv1x1(inplanes, planes) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = conv3x3(planes, planes, stride) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = conv1x1(planes, planes * self.expansion) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000): + super(ResNet, self).__init__() + self.inplanes = 64 + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x \ No newline at end of file diff --git a/mindconverter/mindconverter/modeltest/transformers.py b/mindconverter/mindconverter/modeltest/transformers.py new file mode 100644 index 0000000000000000000000000000000000000000..afdde9aaa93b3cbc635fbc795dda3bb8de14e698 --- /dev/null +++ b/mindconverter/mindconverter/modeltest/transformers.py @@ -0,0 +1,243 @@ +import math +import torch +import numpy as np +import torch.nn as nn +import torch.optim as optim +# S: Symbol that shows starting of decoding input +# E: Symbol that shows starting of decoding output +# P: Symbol that will fill in blank sequence if current batch data size is short than time steps +sentences = [ + # enc_input dec_input dec_output + ['ich mochte ein bier P', 'S i want a beer .', 'i want a beer . E'], + ['ich mochte ein cola P', 'S i want a coke .', 'i want a coke . E'] +] + +# Padding Should be Zero +src_vocab = {'P' : 0, 'ich' : 1, 'mochte' : 2, 'ein' : 3, 'bier' : 4, 'cola' : 5} +src_vocab_size = len(src_vocab) + +tgt_vocab = {'P' : 0, 'i' : 1, 'want' : 2, 'a' : 3, 'beer' : 4, 'coke' : 5, 'S' : 6, 'E' : 7, '.' : 8} +idx2word = {i: w for i, w in enumerate(tgt_vocab)} +tgt_vocab_size = len(tgt_vocab) + +src_len = 5 # enc_input max sequence length +tgt_len = 6 # dec_input(=dec_output) max sequence length + +# Transformer Parameters +d_model = 512 # Embedding Size +d_ff = 2048 # FeedForward dimension +d_k = d_v = 64 # dimension of K(=Q), V +n_layers = 6 # number of Encoder of Decoder Layer +n_heads = 8 # number of heads in Multi-Head Attention + +class PositionalEncoding(nn.Module): + def __init__(self, d_model, dropout=0.1, max_len=5000): + super(PositionalEncoding, self).__init__() + self.dropout = nn.Dropout(p=dropout) + + pe = torch.zeros(max_len, d_model) + position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) + div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0).transpose(0, 1) + self.register_buffer('pe', pe) + + def forward(self, x): + ''' + x: [seq_len, batch_size, d_model] + ''' + x = x + self.pe[:x.size(0), :] + return self.dropout(x) + +def get_attn_pad_mask(seq_q, seq_k): + ''' + seq_q: [batch_size, seq_len] + seq_k: [batch_size, seq_len] + seq_len could be src_len or it could be tgt_len + seq_len in seq_q and seq_len in seq_k maybe not equal + ''' + batch_size, len_q = seq_q.size() + batch_size, len_k = seq_k.size() + # eq(zero) is PAD token + pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # [batch_size, 1, len_k], False is masked + return pad_attn_mask.expand(batch_size, len_q, len_k) # [batch_size, len_q, len_k] + +def get_attn_subsequence_mask(seq): + ''' + seq: [batch_size, tgt_len] + ''' + attn_shape = [seq.size(0), seq.size(1), seq.size(1)] + subsequence_mask = np.triu(np.ones(attn_shape), k=1) # Upper triangular matrix + subsequence_mask = torch.from_numpy(subsequence_mask).byte() + return subsequence_mask # [batch_size, tgt_len, tgt_len] + +class ScaledDotProductAttention(nn.Module): + def __init__(self): + super(ScaledDotProductAttention, self).__init__() + + def forward(self, Q, K, V, attn_mask): + ''' + Q: [batch_size, n_heads, len_q, d_k] + K: [batch_size, n_heads, len_k, d_k] + V: [batch_size, n_heads, len_v(=len_k), d_v] + attn_mask: [batch_size, n_heads, seq_len, seq_len] + ''' + scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k) # scores : [batch_size, n_heads, len_q, len_k] + scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is True. + + attn = nn.Softmax(dim=-1)(scores) + context = torch.matmul(attn, V) # [batch_size, n_heads, len_q, d_v] + return context, attn + +class MultiHeadAttention(nn.Module): + def __init__(self): + super(MultiHeadAttention, self).__init__() + self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=False) + self.W_K = nn.Linear(d_model, d_k * n_heads, bias=False) + self.W_V = nn.Linear(d_model, d_v * n_heads, bias=False) + self.fc = nn.Linear(n_heads * d_v, d_model, bias=False) + def forward(self, input_Q, input_K, input_V, attn_mask): + ''' + input_Q: [batch_size, len_q, d_model] + input_K: [batch_size, len_k, d_model] + input_V: [batch_size, len_v(=len_k), d_model] + attn_mask: [batch_size, seq_len, seq_len] + ''' + residual, batch_size = input_Q, input_Q.size(0) + # (B, S, D) -proj-> (B, S, D_new) -split-> (B, S, H, W) -trans-> (B, H, S, W) + Q = self.W_Q(input_Q).view(batch_size, -1, n_heads, d_k).transpose(1,2) # Q: [batch_size, n_heads, len_q, d_k] + K = self.W_K(input_K).view(batch_size, -1, n_heads, d_k).transpose(1,2) # K: [batch_size, n_heads, len_k, d_k] + V = self.W_V(input_V).view(batch_size, -1, n_heads, d_v).transpose(1,2) # V: [batch_size, n_heads, len_v(=len_k), d_v] + + attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size, n_heads, seq_len, seq_len] + + # context: [batch_size, n_heads, len_q, d_v], attn: [batch_size, n_heads, len_q, len_k] + context, attn = ScaledDotProductAttention()(Q, K, V, attn_mask) + context = context.transpose(1, 2).reshape(batch_size, -1, n_heads * d_v) # context: [batch_size, len_q, n_heads * d_v] + output = self.fc(context) # [batch_size, len_q, d_model] + return nn.LayerNorm(d_model).cuda()(output + residual), attn + +class PoswiseFeedForwardNet(nn.Module): + def __init__(self): + super(PoswiseFeedForwardNet, self).__init__() + self.fc = nn.Sequential( + nn.Linear(d_model, d_ff, bias=False), + nn.ReLU(), + nn.Linear(d_ff, d_model, bias=False) + ) + def forward(self, inputs): + ''' + inputs: [batch_size, seq_len, d_model] + ''' + residual = inputs + output = self.fc(inputs) + return nn.LayerNorm(d_model).cuda()(output + residual) # [batch_size, seq_len, d_model] + +class EncoderLayer(nn.Module): + def __init__(self): + super(EncoderLayer, self).__init__() + self.enc_self_attn = MultiHeadAttention() + self.pos_ffn = PoswiseFeedForwardNet() + + def forward(self, enc_inputs, enc_self_attn_mask): + ''' + enc_inputs: [batch_size, src_len, d_model] + enc_self_attn_mask: [batch_size, src_len, src_len] + ''' + # enc_outputs: [batch_size, src_len, d_model], attn: [batch_size, n_heads, src_len, src_len] + enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask) # enc_inputs to same Q,K,V + enc_outputs = self.pos_ffn(enc_outputs) # enc_outputs: [batch_size, src_len, d_model] + return enc_outputs, attn + +class DecoderLayer(nn.Module): + def __init__(self): + super(DecoderLayer, self).__init__() + self.dec_self_attn = MultiHeadAttention() + self.dec_enc_attn = MultiHeadAttention() + self.pos_ffn = PoswiseFeedForwardNet() + + def forward(self, dec_inputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask): + ''' + dec_inputs: [batch_size, tgt_len, d_model] + enc_outputs: [batch_size, src_len, d_model] + dec_self_attn_mask: [batch_size, tgt_len, tgt_len] + dec_enc_attn_mask: [batch_size, tgt_len, src_len] + ''' + # dec_outputs: [batch_size, tgt_len, d_model], dec_self_attn: [batch_size, n_heads, tgt_len, tgt_len] + dec_outputs, dec_self_attn = self.dec_self_attn(dec_inputs, dec_inputs, dec_inputs, dec_self_attn_mask) + # dec_outputs: [batch_size, tgt_len, d_model], dec_enc_attn: [batch_size, h_heads, tgt_len, src_len] + dec_outputs, dec_enc_attn = self.dec_enc_attn(dec_outputs, enc_outputs, enc_outputs, dec_enc_attn_mask) + dec_outputs = self.pos_ffn(dec_outputs) # [batch_size, tgt_len, d_model] + return dec_outputs, dec_self_attn, dec_enc_attn + +class Encoder(nn.Module): + def __init__(self): + super(Encoder, self).__init__() + self.src_emb = nn.Embedding(src_vocab_size, d_model) + self.pos_emb = PositionalEncoding(d_model) + self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)]) + + def forward(self, enc_inputs): + ''' + enc_inputs: [batch_size, src_len] + ''' + enc_outputs = self.src_emb(enc_inputs) # [batch_size, src_len, d_model] + enc_outputs = self.pos_emb(enc_outputs.transpose(0, 1)).transpose(0, 1) # [batch_size, src_len, d_model] + enc_self_attn_mask = get_attn_pad_mask(enc_inputs, enc_inputs) # [batch_size, src_len, src_len] + enc_self_attns = [] + for layer in self.layers: + # enc_outputs: [batch_size, src_len, d_model], enc_self_attn: [batch_size, n_heads, src_len, src_len] + enc_outputs, enc_self_attn = layer(enc_outputs, enc_self_attn_mask) + enc_self_attns.append(enc_self_attn) + return enc_outputs, enc_self_attns + +class Decoder(nn.Module): + def __init__(self): + super(Decoder, self).__init__() + self.tgt_emb = nn.Embedding(tgt_vocab_size, d_model) + self.pos_emb = PositionalEncoding(d_model) + self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)]) + + def forward(self, dec_inputs, enc_inputs, enc_outputs): + ''' + dec_inputs: [batch_size, tgt_len] + enc_intpus: [batch_size, src_len] + enc_outputs: [batsh_size, src_len, d_model] + ''' + dec_outputs = self.tgt_emb(dec_inputs) # [batch_size, tgt_len, d_model] + dec_outputs = self.pos_emb(dec_outputs.transpose(0, 1)).transpose(0, 1).cuda() # [batch_size, tgt_len, d_model] + dec_self_attn_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs).cuda() # [batch_size, tgt_len, tgt_len] + dec_self_attn_subsequence_mask = get_attn_subsequence_mask(dec_inputs).cuda() # [batch_size, tgt_len, tgt_len] + dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask + dec_self_attn_subsequence_mask), 0).cuda() # [batch_size, tgt_len, tgt_len] + + dec_enc_attn_mask = get_attn_pad_mask(dec_inputs, enc_inputs) # [batc_size, tgt_len, src_len] + + dec_self_attns, dec_enc_attns = [], [] + for layer in self.layers: + # dec_outputs: [batch_size, tgt_len, d_model], dec_self_attn: [batch_size, n_heads, tgt_len, tgt_len], dec_enc_attn: [batch_size, h_heads, tgt_len, src_len] + dec_outputs, dec_self_attn, dec_enc_attn = layer(dec_outputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask) + dec_self_attns.append(dec_self_attn) + dec_enc_attns.append(dec_enc_attn) + return dec_outputs, dec_self_attns, dec_enc_attns + +class Transformer(nn.Module): + def __init__(self): + super(Transformer, self).__init__() + self.encoder = Encoder().cuda() + self.decoder = Decoder().cuda() + self.projection = nn.Linear(d_model, tgt_vocab_size, bias=False).cuda() + def forward(self, enc_inputs, dec_inputs): + ''' + enc_inputs: [batch_size, src_len] + dec_inputs: [batch_size, tgt_len] + ''' + # tensor to store decoder outputs + # outputs = torch.zeros(batch_size, tgt_len, tgt_vocab_size).to(self.device) + + # enc_outputs: [batch_size, src_len, d_model], enc_self_attns: [n_layers, batch_size, n_heads, src_len, src_len] + enc_outputs, enc_self_attns = self.encoder(enc_inputs) + # dec_outpus: [batch_size, tgt_len, d_model], dec_self_attns: [n_layers, batch_size, n_heads, tgt_len, tgt_len], dec_enc_attn: [n_layers, batch_size, tgt_len, src_len] + dec_outputs, dec_self_attns, dec_enc_attns = self.decoder(dec_inputs, enc_inputs, enc_outputs) + dec_logits = self.projection(dec_outputs) # dec_logits: [batch_size, tgt_len, tgt_vocab_size] + return dec_logits.view(-1, dec_logits.size(-1)), enc_self_attns, dec_self_attns, dec_enc_attns \ No newline at end of file diff --git a/mindconverter/mindconverter/ops/f_list.json b/mindconverter/mindconverter/ops/f_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a41f641c786075e21b17a1aa4051a6512161f418 --- /dev/null +++ b/mindconverter/mindconverter/ops/f_list.json @@ -0,0 +1,98 @@ +[ + "torch.nn.functional.adaptive_avg_pool1d", + "torch.nn.functional.adaptive_avg_pool2d", + "torch.nn.functional.adaptive_avg_pool3d", + "torch.nn.functional.adaptive_max_pool1d", + "torch.nn.functional.adaptive_max_pool2d", + "torch.nn.functional.affine_grid", + "torch.nn.functional.avg_pool1d", + "torch.nn.functional.avg_pool2d", + "torch.nn.functional.avg_pool3d", + "torch.nn.functional.batch_norm", + "torch.nn.functional.bilinear", + "torch.nn.functional.binary_cross_entropy", + "torch.nn.functional.binary_cross_entropy_with_logits", + "torch.nn.functional.celu", + "torch.nn.functional.conv1d", + "torch.nn.functional.conv2d", + "torch.nn.functional.conv3d", + "torch.nn.functional.cosine_embedding_loss", + "torch.nn.functional.cosine_similarity", + "torch.nn.functional.cross_entropy", + "torch.nn.functional.ctc_loss", + "torch.nn.functional.linear", + "torch.nn.functional.dropout", + "torch.nn.functional.dropout2d", + "torch.nn.functional.dropout3d", + "torch.nn.functional.elu", + "torch.nn.functional.fold", + "torch.nn.functional.gelu", + "torch.nn.functional.glu", + "torch.nn.functional.grid_sample", + "torch.nn.functional.gumbel_softmax", + "torch.nn.functional.hardshrink", + "torch.nn.functional.hardsigmoid", + "torch.nn.functional.hardswish", + "torch.nn.functional.hardtanh", + "torch.nn.functional.hinge_embedding_loss", + "torch.nn.functional.interpolate", + "torch.nn.functional.kl_div", + "torch.nn.functional.l1_loss", + "torch.nn.functional.leaky_relu", + "torch.nn.functional.logsigmoid", + "torch.nn.functional.log_softmax", + "torch.nn.functional.lp_pool1d", + "torch.nn.functional.lp_pool2d", + "torch.nn.functional.margin_ranking_loss", + "torch.nn.functional.max_pool3d", + "torch.nn.functional.max_unpool1d", + "torch.nn.functional.max_unpool2d", + "torch.nn.functional.max_unpool3d", + "torch.nn.functional.mse_loss", + "torch.nn.functional.multi_margin_loss", + "torch.nn.functional.multilabel_margin_loss", + "torch.nn.functional.multilabel_soft_margin_loss", + "torch.nn.functional.nll_loss", + "torch.nn.functional.pad", + "torch.nn.functional.pdist", + "torch.nn.functional.pixel_shuffle", + "torch.nn.functional.pixel_unshuffle", + "torch.nn.functional.prelu", + "torch.nn.functional.relu", + "torch.nn.functional.relu6", + "torch.nn.functional.rrelu", + "torch.nn.functional.selu", + "torch.nn.functional.sigmoid", + "torch.nn.functional.silu", + "torch.nn.functional.softmax", + "torch.nn.functional.softmin", + "torch.nn.functional.softsign", + "torch.nn.functional.smooth_l1_loss", + "torch.nn.functional.soft_margin_loss", + "torch.nn.functional.softshrink", + "torch.nn.functional.tanh", + "torch.nn.functional.tanhshrink", + "torch.nn.functional.threshold", + "torch.nn.functional.triplet_margin_loss", + "torch.nn.functional.unfold", + "torch.nn.functional.upsample", + "torch.nn.functional.conv_transpose1d", + "torch.nn.functional.conv_transpose2d", + "torch.nn.functional.conv_transpose3d", + "torch.nn.functional.threshold_", + "torch.nn.functional.relu_", + "torch.nn.functional.hardtanh_", + "torch.nn.functional.elu_", + "torch.nn.functional.leaky_relu_", + "torch.nn.functional.rrelu_", + "torch.nn.functional.instance_norm", + "torch.nn.functional.layer_norm", + "torch.nn.functional.local_response_norm", + "torch.nn.functional.normalize", + "torch.nn.functional.alpha_dropout", + "torch.nn.functional.embedding", + "torch.nn.functional.embedding_bag", + "torch.nn.functional.one_hot", + "torch.nn.functional.pairwise_distance", + "torch.nn.functional.max_pool2d" +] \ No newline at end of file diff --git a/mindconverter/mindconverter/ops/nn_list.json b/mindconverter/mindconverter/ops/nn_list.json new file mode 100644 index 0000000000000000000000000000000000000000..1f0a945a8df1c41b3c435d3b5fb9995224000bdc --- /dev/null +++ b/mindconverter/mindconverter/ops/nn_list.json @@ -0,0 +1,120 @@ +[ + "nn.AdaptiveAvgPool1d", + "nn.AdaptiveAvgPool2d", + "nn.AdaptiveAvgPool3d", + "nn.AdaptiveMaxPool1d", + "nn.AdaptiveMaxPool2d", + "nn.AdaptiveMaxPool3d", + "nn.AvgPool1d", + "nn.AvgPool2d", + "nn.AvgPool3d", + "nn.BCELoss", + "nn.BCEWithLogitsLoss", + "nn.BatchNorm1d", + "nn.BatchNorm2d", + "nn.BatchNorm3d", + "nn.Bilinear", + "nn.CeLU", + "nn.ChannelShuffle", + "nn.CTCLoss", + "nn.ConstantPad1d", + "nn.ConstantPad2d", + "nn.ConstantPad3d", + "nn.Conv1d", + "nn.Conv2d", + "nn.Conv3d", + "nn.ConvTranspose1d", + "nn.ConvTranspose2d", + "nn.ConvTranspose3d", + "nn.CosineEmbeddingLoss", + "nn.CrossEntropyLoss", + "nn.Dropout", + "nn.Dropout2d", + "nn.Dropout3d", + "nn.ELU", + "nn.Flatten", + "nn.GaussianNLLLoss", + "nn.GELU", + "nn.GRU", + "nn.GRUCell", + "nn.GroupNorm", + "nn.Hardshrink", + "nn.Hardsigmoid", + "nn.Hardswish", + "nn.Hardtanh", + "nn.HingeEmbeddingLoss", + "nn.HuberLoss", + "nn.Identity", + "nn.init.uniform_", + "nn.InstanceNorm1d", + "nn.InstanceNorm2d", + "nn.InstanceNorm3d", + "nn.KLDivLoss", + "nn.L1Loss", + "nn.LayerNorm", + "nn.LeakyReLU", + "nn.LPPool1d", + "nn.LPPool2d", + "nn.LSTM", + "nn.LSTMCell", + "nn.Linear", + "nn.LocalResponseNorm", + "nn.LogSigmoid", + "nn.LogSoftMax", + "nn.MSELoss", + "nn.MarginRankingLoss", + "nn.MaxPool1d", + "nn.MaxPool2d", + "nn.MaxPool3d", + "nn.MaxUnpool1d", + "nn.MaxUnpool2d", + "nn.MaxUnpool3d", + "nn.ModuleDict", + "nn.ModuleList", + "nn.MultiheadAttention", + "nn.MultiLabelMarginLoss", + "nn.MultiLabelSoftMarginLoss", + "nn.MultiMarginLoss", + "nn.NLLLoss", + "nn.PReLU", + "nn.PixelShuffle", + "nn.PixelUnshuffle", + "nn.PoissonNLLLoss", + "nn.ReflectionPad1d", + "nn.ReflectionPad2d", + "nn.ReLU", + "nn.ReLU6", + "nn.ReplicationPad1d", + "nn.ReplicationPad2d", + "nn.ReplicationPad3d", + "nn.RNN", + "nn.RNNCell", + "nn.RReLU", + "nn.SeLU", + "nn.Sequential", + "nn.Sigmoid", + "nn.SiLU", + "nn.SmoothL1Loss", + "nn.SoftMarginLoss", + "nn.Softmax", + "nn.Softmin", + "nn.Softmax2d", + "nn.Softshrink", + "nn.Softsign", + "nn.SyncBatchNorm", + "nn.Tanh", + "nn.Tanhshrink", + "nn.Threshold", + "nn.Transformer", + "nn.TransformerDecoder", + "nn.TransformerEncoder", + "nn.TransformerDecoderLayer", + "nn.TransformerEncoderLayer", + "nn.TripletMarginLoss", + "nn.Unflatten", + "nn.Unfold", + "nn.Upsample", + "nn.ZeroPad2d", + "nn.utils.clip_grad_value_", + "nn.utils.clip_grad_norm_" +] \ No newline at end of file diff --git a/mindconverter/mindconverter/ops/tensor_dot_list.json b/mindconverter/mindconverter/ops/tensor_dot_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9bb3279c0bacdf003aa49413627eb0f9a8bfaf77 --- /dev/null +++ b/mindconverter/mindconverter/ops/tensor_dot_list.json @@ -0,0 +1,223 @@ +[ + ".abs", + ".absolute", + ".acos", + ".acosh", + ".add", + ".addbmm", + ".addcdiv", + ".addcmul", + ".addmm", + ".addmv", + ".addr", + ".all", + ".amax", + ".amin", + ".any", + ".arccos", + ".arccosh", + ".argmax", + ".angle", + ".arcsin", + ".arcsinh", + ".arctan", + ".arctanh", + ".argmin", + ".argsort", + ".asin", + ".asinh", + ".atan", + ".atan2", + ".atanh", + ".baddbmm", + ".bernoulli", + ".bincount", + ".bitwise_and", + ".bitwise_or", + ".bitwise_xor", + ".bmm", + ".bool", + ".broadcast_to", + ".cauchy_", + ".ceil", + ".chunk", + ".cholesky", + ".cholesky_solve", + ".clamp", + ".clip", + ".conj", + ".copysign", + ".cos", + ".cosh", + ".cross", + ".cummax", + ".cummin", + ".cumprod", + ".cumsum", + ".deg2rad", + ".diag", + ".diagflat", + ".diff", + ".digamma", + ".dim", + ".div", + ".divide", + ".eq", + ".erf", + ".erfc", + ".erfinv", + ".exp", + ".expand", + ".expand_as", + ".expm1", + ".fill_diagonal_", + ".flip", + ".fliplr", + ".flipud", + ".float", + ".float_power", + ".floor", + ".floor_divide", + ".fmax", + ".fmod", + ".frac", + ".gather", + ".ge", + ".geqrf", + ".ger", + ".greater", + ".greater_equal", + ".gt", + ".H", + ".half", + ".hardshrink", + ".heaviside", + ".histc", + ".hypot", + ".i0", + ".igamma", + ".igammac", + ".imag", + ".index_add", + ".index_fill", + ".index_put", + ".index_select", + ".inner", + ".int", + ".inverse", + ".isclose", + ".isfinite", + ".isinf", + ".isnan", + ".is_complex", + ".is_floating_point", + ".is_signed", + ".isneginf", + ".isposinf", + ".isreal", + ".item", + ".lcm", + ".ldexp", + ".le", + ".lerp", + ".less", + ".less_equal", + ".log", + ".log_normal", + ".log10", + ".log1p", + ".log2", + ".logaddexp", + ".logaddexp2", + ".logcumsumexp", + ".logdet", + ".logical_and", + ".logical_not", + ".logical_or", + ".logical_xor", + ".logit", + ".logsumexp", + ".long", + ".lt", + ".lu_solve", + ".masked_fill", + ".masked_scatter", + ".masked_select", + ".matmul", + ".max", + ".maximum", + ".mean", + ".median", + ".mH", + ".min", + ".minimum", + ".mm", + ".moveaxis", + ".movedim", + ".msort", + ".mT", + ".mul", + ".multinomial", + ".multiply", + ".mvlgamma", + ".nan_to_num", + ".nanmedian", + ".nansum", + ".narrow", + ".ndim", + ".ndimension", + ".ne", + ".neg", + ".negative", + ".nelement", + ".new_ones", + ".new_zeros", + ".nextafter", + ".norm", + ".nonzero", + ".not_equal", + ".numel", + ".numpy", + ".orgqr", + ".ormqr", + ".outer", + ".permute", + ".pow", + ".prod", + ".ravel", + ".real", + ".reciprocal", + ".remainder", + ".renorm", + ".rad2deg", + ".repeat", + ".repeat_interleave", + ".reshape", + ".reshape_as", + ".round", + ".roll", + ".rot90", + ".rsqrt", + ".scatter_", + ".sum_to_size", + ".scatter", + ".sgn", + ".short", + ".sigmoid", + ".sign", + ".signbit", + ".sin", + ".sinc", + ".sinh", + ".size", + ".slogdet", + ".sort", + ".split", + ".sqrt", + ".square", + ".squeeze", + ".std", + ".sub", + ".subtract", + ".sum" +] \ No newline at end of file diff --git a/mindconverter/mindconverter/ops/torch_dot_list.json b/mindconverter/mindconverter/ops/torch_dot_list.json new file mode 100644 index 0000000000000000000000000000000000000000..057edd268d098a4141a009e0663a37a1243d800f --- /dev/null +++ b/mindconverter/mindconverter/ops/torch_dot_list.json @@ -0,0 +1,274 @@ +[ + "torch.abs", + "torch.absolute", + "torch.acos", + "torch.acosh", + "torch.add", + "torch.addbmm", + "torch.addcdiv", + "torch.addcmul", + "torch.addmm", + "torch.addmv", + "torch.addr", + "torch.all", + "torch.amax", + "torch.amin", + "torch.angle", + "torch.any", + "torch.arange", + "torch.arccos", + "torch.arccosh", + "torch.arcsin", + "torch.arcsinh", + "torch.arctan", + "torch.arctanh", + "torch.argmax", + "torch.argmin", + "torch.argsort", + "torch.asin", + "torch.asinh", + "torch.atan", + "torch.atan2", + "torch.atanh", + "torch.atleast_1d", + "torch.atleast_2d", + "torch.atleast_3d", + "torch.baddbmm", + "torch.bartlett_window", + "torch.bernoulli", + "torch.bincount", + "torch.bitwise_and", + "torch.bitwise_or", + "torch.bitwise_xor", + "torch.blackman_window", + "torch.block_diag", + "torch.bmm", + "torch.bucketize", + "torch.broadcast_to", + "torch.cat", + "torch.cdist", + "torch.ceil", + "torch.cholesky", + "torch.cholesky_solve", + "torch.chunk", + "torch.clamp", + "torch.clip", + "torch.column_stack", + "torch.combinations", + "torch.conj", + "torch.copysign", + "torch.cos", + "torch.cosh", + "torch.count_nonzero", + "torch.cross", + "torch.cummax", + "torch.cummin", + "torch.cumprod", + "torch.cumsum", + "torch.deg2rad", + "torch.diag", + "torch.diag_embed", + "torch.diagflat", + "torch.diagonal", + "torch.diff", + "torch.digamma", + "torch.dist", + "torch.div", + "torch.divide", + "torch.dot", + "torch.dstack", + "torch.einsum", + "torch.eq", + "torch.erf", + "torch.erfc", + "torch.erfinv", + "torch.exp", + "torch.exp2", + "torch.expm1", + "torch.eye", + "torch.flatten", + "torch.float_power", + "torch.flip", + "torch.fliplr", + "torch.flipud", + "torch.floor", + "torch.floor_divide", + "torch.fmax", + "torch.fmod", + "torch.frac", + "torch.full", + "torch.full_like", + "torch.gather", + "torch.gcd", + "torch.ge", + "torch.geqrf", + "torch.ger", + "torch.greater", + "torch.greater_equal", + "torch.gt", + "torch.hann_window", + "torch.heaviside", + "torch.hstack", + "torch.histc", + "torch.hypot", + "torch.hamming_window", + "torch.i0", + "torch.igamma", + "torch.igammac", + "torch.imag", + "torch.index_select", + "torch.inner", + "torch.inverse", + "torch.isclose", + "torch.isfinite", + "torch.isinf", + "torch.isnan", + "torch.isneginf", + "torch.isposinf", + "torch.isreal", + "torch.is_complex", + "torch.is_floating_point", + "torch.is_nonzero", + "torch.is_tensor", + "torch.kaiser_window", + "torch.kron", + "torch.lcm", + "torch.ldexp", + "torch.le", + "torch.lerp", + "torch.less", + "torch.less_equal", + "torch.linalg.cond", + "torch.linalg.eigvals", + "torch.linalg.norm", + "torch.linspace", + "torch.log", + "torch.log2", + "torch.log10", + "torch.logaddexp", + "torch.logaddexp2", + "torch.logcumsumexp", + "torch.log1p", + "torch.logdet", + "torch.logical_and", + "torch.logical_not", + "torch.logical_or", + "torch.logical_xor", + "torch.logit", + "torch.logspace", + "torch.logsumexp", + "torch.lt", + "torch.lu_solve", + "torch.lu_unpack", + "torch.masked_select", + "torch.matmul", + "torch.max", + "torch.maximum", + "torch.mean", + "torch.median", + "torch.meshgrid", + "torch.mm", + "torch.mul", + "torch.min", + "torch.minimum", + "torch.msort", + "torch.moveaxis", + "torch.movedim", + "torch.multinomial", + "torch.multiply", + "torch.mv", + "torch.mvlgamma", + "torch.nan_to_num", + "torch.nansum", + "torch.narrow", + "torch.ne", + "torch.neg", + "torch.negative", + "torch.nextafter", + "torch.nonzero", + "torch.normal", + "torch.not_equal", + "torch.numel", + "torch.ones", + "torch.ones_like", + "torch.orgqr", + "torch.ormqr", + "torch.outer", + "torch.poisson", + "torch.polar", + "torch.polygamma", + "torch.pow", + "torch.prod", + "torch.rad2deg", + "torch.rand", + "torch.rand_like", + "torch.randn", + "torch.randn_like", + "torch.randint", + "torch.randint_like", + "torch.randperm", + "torch.range", + "torch.ravel", + "torch.real", + "torch.reciprocal", + "torch.remainder", + "torch.renorm", + "torch.repeat_interleave", + "torch.reshape", + "torch.roll", + "torch.rot90", + "torch.round", + "torch.row_stack", + "torch.rsqrt", + "torch.searchsorted", + "torch.scatter", + "torch.scatter_add", + "torch.sgn", + "torch.sigmoid", + "torch.sign", + "torch.signbit", + "torch.sin", + "torch.sinc", + "torch.sinh", + "torch.slogdet", + "torch.sort", + "torch.split", + "torch.stack", + "torch.squeeze", + "torch.std", + "torch.std_mean", + "torch.sqrt", + "torch.square", + "torch.sub", + "torch.subtract", + "torch.sum", + "torch.svd", + "torch.swapaxes", + "torch.swapdims", + "torch.t", + "torch.tan", + "torch.tanh", + "torch.tensor_split", + "torch.tile", + "torch.topk", + "torch.trace", + "torch.transpose", + "torch.trapz", + "torch.tril", + "torch.tril_indices", + "torch.triu_indices", + "torch.true_divide", + "torch.trunc", + "torch.unbind", + "torch.unique", + "torch.unique_consecutive", + "torch.unsqueeze", + "torch.vander", + "torch.var", + "torch.var_mean", + "torch.view_as_real", + "torch.vstack", + "torch.where", + "torch.xlogy", + "torch.zeros_like" +] \ No newline at end of file diff --git a/mindconverter/mindconverter/ops/torch_optim_list.json b/mindconverter/mindconverter/ops/torch_optim_list.json new file mode 100644 index 0000000000000000000000000000000000000000..64f9c44554b20f2613bbb3ecbb2dfee18f843862 --- /dev/null +++ b/mindconverter/mindconverter/ops/torch_optim_list.json @@ -0,0 +1,27 @@ +[ + "optim.Adadelta", + "optim.Adagrad", + "optim.Adam", + "optim.AdaMax", + "optim.AdamW", + "optim.ASGD", + "optim.Optimizer", + "optim.SparseAdam", + "optim.RMSProp", + "optim.Rprop", + "optim.SGD", + "optim.lr_scheduler.CosineAnnealingLR", + "optim.lr_scheduler.ExponentialLR", + "optim.lr_scheduler.MultiStepLR", + "optim.lr_scheduler.StepLR", + "optim.lr_scheduler.LRScheduler", + "optim.lr_scheduler.ConstantLR", + "optim.lr_scheduler.CosineAnnealingWarmRestarts", + "optim.lr_scheduler.CyclicLR", + "optim.lr_scheduler.LambdaLR", + "optim.lr_scheduler.LinearLR", + "optim.lr_scheduler.MultiplicativeLR", + "optim.lr_scheduler.PolynomialLR", + "optim.lr_scheduler.ReduceLROnPlateau", + "optim.lr_scheduler.SequentialLR" +] \ No newline at end of file diff --git a/mindconverter/mindconverter/test/README.md b/mindconverter/mindconverter/test/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b2a4055acd3b6c6accb19004fc49361a24636b5c --- /dev/null +++ b/mindconverter/mindconverter/test/README.md @@ -0,0 +1,46 @@ +# 测试文档 + +`unit.test.py` + +### 算子模块的独立测试: +- 单独测试torch算子,``test_torch_migrate(torch_api, ms_template_api)`` + +- 单独测试torch.nn算子, `test_torch_nn_migrate(torch_nn_api, ms_nn_template_api)` + +- 单独测试torch.Tensor算子,`test_tensor_migrate(tensor_api, ms_tensor_template_api)` + +- 单独测试torch.optim算子,`test_torch_optim_migrate(torch_optim_api, ms_template_api)` + +**测试样例:** + +参数1:需要输入待测试的算子信息,例如:torch.abs(-123) + +参数2:使用mindsporeAPI正确转换的算子信息,例如:ops.abs(-123) + +**目标:** + +测试程序会将torch.abs(-123)进行转换,并比较转换后的算子参数等信息是否与测试员给出的正确转换的算子信息一致 + +**示例:** + +输入:torch.abs(-123), ops.abs(-123) + +输入参数:test_torch_migrate(“torch.abs(-123)”, “ops.abs(-123)”) + +=>Mindconverter[ torch.abs(-123) ] = ops.abs(-123) = ms_template_api + +=>测试通过 + +### 完整模型文件测试: + +**测试样例:** + +`test_torch_mindspore_model(model_file_path, ms_template_file_path)` + +参数1:待转换的torch模型代码 + +参数2:mindsporeAPI标准转换的模型代码 + +**目标:** + +测试程序会将待转换的torch模型代码进行转换,然后与测试员给出的mindsporeAPI的标准转换代码进行比较,通过mindconverter转换后的代码与测试员的标准代码一致,则测试通过。 diff --git a/mindconverter/mindconverter/test/modeltest/lenet.py b/mindconverter/mindconverter/test/modeltest/lenet.py new file mode 100644 index 0000000000000000000000000000000000000000..d58e51fc3c7e03a4cc07a73611ca4badc69baa04 --- /dev/null +++ b/mindconverter/mindconverter/test/modeltest/lenet.py @@ -0,0 +1,24 @@ +import torch.nn as nn +import torch.nn.functional as F + + +class TestLeNet(nn.Module): + """TestLeNet network.""" + def __init__(self): + self.conv1 = nn.Conv2d(3, 6, 5) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, input_x): + """Callback method.""" + out = F.relu(self.conv1(input_x)) + out = F.max_pool2d(out, 2) + out = F.relu(self.conv2(out)) + out = F.max_pool2d(out, 2) + out = out.view(out.size(0), -1) + out = F.relu(self.fc1(out)) + out = F.relu(self.fc2(out)) + out = self.fc3(out) + return out diff --git a/mindconverter/mindconverter/test/template_model/lenet.py b/mindconverter/mindconverter/test/template_model/lenet.py new file mode 100644 index 0000000000000000000000000000000000000000..7cbd1bbca3c41c237e0db14078c15fc047518bad --- /dev/null +++ b/mindconverter/mindconverter/test/template_model/lenet.py @@ -0,0 +1,26 @@ +import mindspore.ops +import mindspore.nn as nn +import mindspore.experimental.optim as optim +import mindspore.ops as ops + + +class TestLeNet(nn.Cell): + """TestLeNet network.""" + def __init__(self): + self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5) + self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5) + self.fc1 = nn.Dense(in_channels=16 * 5 * 5, out_channels=120) + self.fc2 = nn.Dense(in_channels=120, out_channels=84) + self.fc3 = nn.Dense(in_channels=84, out_channels=10) + + def construct(self, input_x): + """Callback method.""" + out = ops.relu(input=self.conv1(input_x)) + out = ops.MaxPool(ksize=2, strides=2, padding='valid', input=out) + out = ops.relu(input=self.conv2(out)) + out = ops.MaxPool(ksize=2, strides=2, padding='valid', input=out) + out = out.view(out.shape(0), -1) + out = ops.relu(input=self.fc1(out)) + out = ops.relu(input=self.fc2(out)) + out = self.fc3(out) + return out \ No newline at end of file diff --git a/mindconverter/mindconverter/test/unit_test.py b/mindconverter/mindconverter/test/unit_test.py new file mode 100644 index 0000000000000000000000000000000000000000..14ca09eccf3025df87782f979c693bc9c2d46357 --- /dev/null +++ b/mindconverter/mindconverter/test/unit_test.py @@ -0,0 +1,187 @@ +import os, sys +sys.path.append(os.getcwd()) +import pytest +import tempfile +from mindconverter.cli import _run +from pathlib import Path + +standared_code_template = '''import torch +import torch.nn as nn +from torch import optim + +class Model(nn.Module): +''' +init_code_template = ''' + def __init__(self): + super(Model, self).__init__() +''' +forward_code_template = ''' + def forward(self, x): +''' + +#Test 1#测试单独的算子 +#输入格式 需要转换的torch算子,目标转换的mindspore的算子 +#测试torch +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.platform_arm_cpu +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +def test_torch_migrate(torch_api, ms_template_api): + model_temp = standared_code_template + init_code_template + forward_code_template + + test_torch = " " + torch_api + model_temp += test_torch + temp_dir_t = tempfile.TemporaryDirectory() + temp_dir_file = temp_dir_t.name + "/temp.py" + temp_file = open(temp_dir_file, "w") + temp_file.write(model_temp) + temp_file.close() + + output_dir_t = tempfile.TemporaryDirectory() + output_dir = output_dir_t.name + rep_dir_t = tempfile.TemporaryDirectory() + rep_dir = rep_dir_t.name + _run(temp_dir_file,output_dir,rep_dir) + output_file = open(output_dir + "/temp.py", 'r') + converter_code = output_file.readlines() + output_file.close() + + assert converter_code[-1].rstrip("\n").replace(" ","") == ms_template_api + +# #测试torch.nn +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.platform_arm_cpu +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +def test_torch_nn_migrate(torch_nn_api, ms_nn_template_api): + model_temp = standared_code_template + init_code_template + + test_torch = " " + torch_nn_api + model_temp += test_torch + temp_dir_t = tempfile.TemporaryDirectory() + temp_dir_file = temp_dir_t.name + "/temp.py" + temp_file = open(temp_dir_file, "w") + temp_file.write(model_temp) + temp_file.close() + + output_dir_t = tempfile.TemporaryDirectory() + output_dir = output_dir_t.name + rep_dir_t = tempfile.TemporaryDirectory() + rep_dir = rep_dir_t.name + _run(temp_dir_file,output_dir,rep_dir) + output_file = open(output_dir + "/temp.py", 'r') + converter_code = output_file.readlines() + output_file.close() + + assert converter_code[-1].rstrip("\n").replace(" ","") == ms_nn_template_api + +# #测试Tensor +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.platform_arm_cpu +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +def test_tensor_migrate(tensor_api, ms_tensor_template_api): + model_temp = standared_code_template + init_code_template + forward_code_template + + test_torch = " " + tensor_api + model_temp += test_torch + temp_dir_t = tempfile.TemporaryDirectory() + temp_dir_file = temp_dir_t.name + "/temp.py" + temp_file = open(temp_dir_file, "w") + temp_file.write(model_temp) + temp_file.close() + + output_dir_t = tempfile.TemporaryDirectory() + output_dir = output_dir_t.name + rep_dir_t = tempfile.TemporaryDirectory() + rep_dir = rep_dir_t.name + _run(temp_dir_file,output_dir,rep_dir) + output_file = open(output_dir + "/temp.py", 'r') + converter_code = output_file.readlines() + output_file.close() + + assert converter_code[-1].rstrip("\n").replace(" ","") == ms_tensor_template_api + +# #测试optim +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.platform_arm_cpu +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +def test_torch_optim_migrate(torch_optim_api, ms_template_api): + model_temp = standared_code_template + init_code_template + forward_code_template + + test_torch = " " + torch_optim_api + model_temp += test_torch + temp_dir_t = tempfile.TemporaryDirectory() + temp_dir_file = temp_dir_t.name + "/temp.py" + temp_file = open(temp_dir_file, "w") + temp_file.write(model_temp) + temp_file.close() + + output_dir_t = tempfile.TemporaryDirectory() + output_dir = output_dir_t.name + rep_dir_t = tempfile.TemporaryDirectory() + rep_dir = rep_dir_t.name + _run(temp_dir_file,output_dir,rep_dir) + output_file = open(output_dir + "/temp.py", 'r') + converter_code = output_file.readlines() + output_file.close() + + assert converter_code[-1].rstrip("\n").replace(" ","") == ms_template_api + +#Test 2测试整个模型文件 +#输入torch模型文件,以及标准转换的mindspore模型文件 +#测试整个模型文件 +model_file_path = "./modeltest/" +ms_template_file_path = "./template_model/" +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.platform_arm_cpu +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.parametrize("model_file_path, ms_template_file_path", model_file_path, ms_template_file_path) +def test_torch_mindspore_model(capsys, model_file_path, ms_template_file_path): + infile = model_file_path + output_dir_t = tempfile.TemporaryDirectory() + output_dir = output_dir_t.name + rep_dir_t = tempfile.TemporaryDirectory() + rep_dir = rep_dir_t.name + modelname = infile.split("/")[-1] + path_converter = Path(output_dir + "/" + modelname) + _run(infile,output_dir,rep_dir) + output_file = open(path_converter, 'r') + converter_code = output_file.readlines() + output_file.close() + ms_code_file = open(ms_template_file_path, 'r') + ms_code = ms_code_file.readlines() + ms_code_file.close() + pos_converter = 0 + pos_ms_code = 0 + #忽略import和from import语句 + for i in range(len(converter_code)): + if converter_code[i].startswith("import") or converter_code[i].startswith("from") or converter_code[i] == "\n": + continue + else: + pos_converter = i + break + + for i in range(len(ms_code)): + if ms_code[i].startswith("import") or ms_code[i].startswith("from") or converter_code[i] == "\n": + continue + else: + pos_ms_code = i + break + + assert (len(converter_code) - pos_converter) == (len(ms_code) - pos_ms_code) + + for i in range(pos_converter, len(converter_code)): + assert converter_code[i].rstrip('\n') == ms_code[i].rstrip('\n') \ No newline at end of file diff --git a/mindconverter/mindconverter/utils/__init__.py b/mindconverter/mindconverter/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..57f06003e1bb244a274e0589de6ba44ce950f924 --- /dev/null +++ b/mindconverter/mindconverter/utils/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ \ No newline at end of file diff --git a/mindconverter/mindconverter/utils/constant.py b/mindconverter/mindconverter/utils/constant.py new file mode 100644 index 0000000000000000000000000000000000000000..d96451325bf19f8db6a082bed6666c38e9280e2d --- /dev/null +++ b/mindconverter/mindconverter/utils/constant.py @@ -0,0 +1,91 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Constant module.""" + + +from enum import Enum + + +class MindInsightModules(Enum): + """ + Enum definition for MindInsight error types. + + Note: + Each enum value, excluding GENERAL, has an Errors class name starting with the enum value + in Camel-Case referring to specific module. + """ + GENERAL = 0 + LINEAGEMGR = 2 + DATAVISUAL = 5 + PROFILERMGR = 6 + SCRIPTCONVERTER = 7 + SYSMETRIC = 8 + WIZARD = 9 + + +class GeneralErrors(Enum): + """Enum definition for general errors.""" + UNKNOWN_ERROR = 0 + PARAM_TYPE_ERROR = 1 + PARAM_VALUE_ERROR = 2 + PARAM_MISSING_ERROR = 3 + PATH_NOT_EXISTS_ERROR = 4 + FILE_SYSTEM_PERMISSION_ERROR = 8 + PORT_NOT_AVAILABLE_ERROR = 9 + URL_DECODE_ERROR = 10 + COMPUTING_RESOURCE_ERROR = 11 + + +class ProfilerMgrErrors(Enum): + """Enum definition for profiler errors.""" + + +class LineageMgrErrors(Enum): + """Enum definition for lineage errors.""" + + +class DataVisualErrors(Enum): + """Enum definition for datavisual errors.""" + RESTFUL_API_NOT_EXIST = 1 + REQUEST_METHOD_NOT_ALLOWED = 2 + MAX_COUNT_EXCEEDED_ERROR = 3 + CRC_FAILED = 4 + TRAIN_JOB_NOT_EXIST = 5 + SUMMARY_LOG_PATH_INVALID = 6 + SUMMARY_LOG_IS_LOADING = 7 + NODE_NOT_IN_GRAPH_ERROR = 9 + PATH_NOT_DIRECTORY_ERROR = 10 + PLUGIN_NOT_AVAILABLE = 11 + GRAPH_NOT_EXIST = 12 + IMAGE_NOT_EXIST = 13 + SCALAR_NOT_EXIST = 14 + HISTOGRAM_NOT_EXIST = 15 + TRAIN_JOB_DETAIL_NOT_IN_CACHE = 16 + QUERY_STRING_CONTAINS_NULL_BYTE = 17 + TENSOR_NOT_EXIST = 18 + MAX_RESPONSE_DATA_EXCEEDED_ERROR = 19 + STEP_TENSOR_DATA_NOT_IN_CACHE = 20 + + +class ScriptConverterErrors(Enum): + """Enum definition for mindconverter errors.""" + +class SysmetricErrors(Enum): + """Enum definition for sysmetric errors.""" + DSMI_QUERYING_NONZERO = 1 + + +class WizardErrors(Enum): + """Enum definition for mindwizard errors.""" diff --git a/mindconverter/mindconverter/utils/exceptions.py b/mindconverter/mindconverter/utils/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..9705d9c47cb72d05066296e525cf9af4961f5215 --- /dev/null +++ b/mindconverter/mindconverter/utils/exceptions.py @@ -0,0 +1,187 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Exception module.""" + +from importlib import import_module +from mindconverter.utils.constant import MindInsightModules, GeneralErrors + + +class MindInsightException(Exception): + """ + Base class for MindInsight exception. + + Examples: + >>> raise MindInsightException(GeneralErrors.PATH_NOT_EXISTS_ERROR, 'path not exists') + >>> raise MindInsightException(DataVisualErrors.CUSTOMIZED_ERROR, 'datavisual error message') + """ + + RUNTIME = 1 + TYPE = 1 + LEVEL = 0 + SYSID = 42 + + def __init__(self, error, message, http_code=500): + """ + Initialization of MindInsightException. + + Args: + error (Enum): Error value for specified case. + message (str): Description for exception. + http_code (int): Http code for exception. Default is 500. + """ + if isinstance(message, str): + message = ' '.join(message.split()) + super(MindInsightException, self).__init__(message) + self.error = error + self.message = message + self.http_code = http_code + + def parse_module(self): + """ + Parse module according to error enum class. + + Note: + Each enum value, excluding GENERAL, has an Errors class name starting with the enum value + in Camel-Case referring to specific module. + + Returns: + Enum, module for specified error. + """ + + module = None + constant = import_module('mindinsight.utils.constant') + errors_names = [item for item in dir(constant) if item.endswith('Errors')] + for name in errors_names: + errors_cls = getattr(constant, name) + if isinstance(self.error, errors_cls): + key = name[:-len('Errors')].upper() + module = getattr(MindInsightModules, key, None) + break + + return module + + @property + def error_code(self): + """ + Transform exception no to MindInsight error code. + + code compose(4bytes): + runtime 2bits, type 2bits, level 3bits, sysid 8bits, modid 5bits, value 12bits. + + num = ((0xFF & runtime) << 30) \ + | ((0xFF & type) << 28) \ + | ((0xFF & level) << 25) \ + | ((0xFF & sysid) << 17) \ + | ((0xFF & modid) << 12) \ + | (0x0FFF & value) + + Returns: + str, Hex string representing the composed MindInsight error code. + """ + + module = self.parse_module() + if not module: + raise UnknownError('Unknown module for {}.'.format(self.error)) + + num = (((0xFF & self.RUNTIME) << 30) + | ((0xFF & self.TYPE) << 28) + | ((0xFF & self.LEVEL) << 25) + | ((0xFF & self.SYSID) << 17) + | ((0xFF & module.value) << 12) + | (0x0FFF & self.error.value)) + + return hex(num)[2:].zfill(8).upper() + + def __str__(self): + return '[{}] code: {}, msg: {}'.format(self.__class__.__name__, self.error_code, self.message) + + +class ParamValueError(MindInsightException): + """Request param value error.""" + def __init__(self, error_detail): + error_msg = 'Invalid parameter value. {}'.format(error_detail) + super(ParamValueError, self).__init__( + GeneralErrors.PARAM_VALUE_ERROR, + error_msg, + http_code=400) + + +class ParamTypeError(MindInsightException): + """Request param type error.""" + def __init__(self, param_name, expected_type): + error_msg = "Invalid parameter type. '{}' expect {} type.".format(param_name, expected_type) + super(ParamTypeError, self).__init__( + GeneralErrors.PARAM_TYPE_ERROR, + error_msg, + http_code=400) + + +class ParamMissError(MindInsightException): + """Missing param error.""" + def __init__(self, param_name): + error_msg = "Param missing. '{}' is required.".format(param_name) + super(ParamMissError, self).__init__( + GeneralErrors.PARAM_MISSING_ERROR, + error_msg, + http_code=400) + + +class PathNotExistError(MindInsightException): + """Raised when specified path do not exist.""" + def __init__(self, error_detail): + """Initialize PathNotExistError.""" + error_msg = 'Specified path does not exist. Detail: {}'.format(error_detail) + super(PathNotExistError, self).__init__( + GeneralErrors.PATH_NOT_EXISTS_ERROR, + error_msg, + http_code=400) + + +class FileSystemPermissionError(MindInsightException): + """Can not access file or dir.""" + def __init__(self, error_detail): + error_msg = 'File or dir access failed. Detail: {}'.format(error_detail) + super(FileSystemPermissionError, self).__init__( + GeneralErrors.FILE_SYSTEM_PERMISSION_ERROR, + error_msg, + http_code=400) + + +class PortNotAvailableError(MindInsightException): + """Port not available error..""" + def __init__(self, error_detail): + error_msg = 'Port not available error. Detail: {}'.format(error_detail) + super(PortNotAvailableError, self).__init__( + GeneralErrors.PORT_NOT_AVAILABLE_ERROR, + error_msg, + http_code=400) + + +class UnknownError(MindInsightException): + """Unknown error.""" + def __init__(self, error_msg): + super(UnknownError, self).__init__( + GeneralErrors.UNKNOWN_ERROR, + error_msg, + http_code=500) + + +class UrlDecodeError(MindInsightException): + """Url decoding failed""" + def __init__(self, error_detail): + error_msg = f"Url decode failed. Detail: {error_detail}" + super(UrlDecodeError, self).__init__(GeneralErrors.URL_DECODE_ERROR, + error_msg, + http_code=400) diff --git a/mindconverter/mindconverter/utils/log.py b/mindconverter/mindconverter/utils/log.py new file mode 100644 index 0000000000000000000000000000000000000000..d0166188378343ea9c62ada531ad088733799286 --- /dev/null +++ b/mindconverter/mindconverter/utils/log.py @@ -0,0 +1,229 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Log module.""" + +import sys +import os +import stat +import time +import fcntl +import logging +from logging.handlers import RotatingFileHandler + +from mindconverter.conf import settings +from mindconverter.utils.exceptions import MindInsightException +from mindconverter.utils.constant import GeneralErrors + + +class MultiCompatibleRotatingFileHandler(RotatingFileHandler): + """Inherit RotatingFileHandler for multiprocess compatibility.""" + + def rolling_rename(self): + """Rolling rename log files.""" + for i in range(self.backupCount - 1, 0, -1): + sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) + dfn = self.rotation_filename("%s.%d" % (self.baseFilename, i + 1)) + if os.path.exists(sfn): + if os.path.exists(dfn): + os.remove(dfn) + os.chmod(sfn, stat.S_IREAD) + os.rename(sfn, dfn) + + def doRollover(self): + """Do a rollover, as described in __init__().""" + if self.stream: + self.stream.close() + self.stream = None + + # Attain an exclusive lock with bloking mode by `fcntl` module. + with open(self.baseFilename, 'a') as file_pointer: + fcntl.lockf(file_pointer.fileno(), fcntl.LOCK_EX) + + if self.backupCount > 0: + self.rolling_rename() + + dfn = self.rotation_filename(self.baseFilename + ".1") + if os.path.exists(dfn): + os.remove(dfn) + + os.chmod(self.baseFilename, stat.S_IREAD) + self.rotate(self.baseFilename, dfn) + + with open(self.baseFilename, 'a'): + os.chmod(self.baseFilename, stat.S_IREAD | stat.S_IWRITE) + + if not self.delay: + self.stream = self._open() + + def _open(self): + """Open the current base file with the (original) mode and encoding.""" + new_log = open(self.baseFilename, self.mode, encoding=self.encoding) + os.chmod(self.baseFilename, stat.S_IREAD | stat.S_IWRITE) + return new_log + + +class MindInsightFormatter(logging.Formatter): + """ + MindInsight formatter. + """ + + def __init__(self, sub_module, fmt=None, **kwargs): + """ + Initialization of SlogFormatter. + + Args: + sub_module (str): Sub module name, type is string. + fmt (str): Specified format pattern, type is string. + + Returns: + Formatter, instance of SlogFormatter. + """ + super(MindInsightFormatter, self).__init__(fmt=fmt, **kwargs) + self.sub_module = sub_module.upper() + + def formatTime(self, record, datefmt=None): + """ + Overwrite for uniform format %Y-%m-%d-%H:%M:%S.SSS.SSS + + Args: + record (LogRecord): Log record. + datefmt (str): Date format, type is string. + + Returns: + str, formatted timestamp, type is string. + """ + created_time = self.converter(record.created) + if datefmt: + return time.strftime(datefmt, created_time) + + timestamp = time.strftime('%Y-%m-%d-%H:%M:%S', created_time) + msecs = str(round(record.msecs * 1000)).zfill(6) + return '{}.{}.{}'.format(timestamp, msecs[:3], msecs[3:]) + + def formatMessage(self, record): + """Escape the message before format.""" + record.message = ' '.join(record.message.split()) + return super().formatMessage(record) + + def format(self, record): + """ + Apply log format with specified pattern. + + Args: + record (str): Format pattern, type is string. + + Returns: + str, formatted log content according to format pattern, type if string. + """ + record.filepath = record.pathname[__file__.rfind('mindinsight'):] + record.sub_module = self.sub_module + return super().format(record) + + +def get_logger(sub_module, log_name): + """ + Get logger by name and sub module. + + Args: + sub_module (str): Sub module name, type is string. + log_name (str): Log file name, type is string. + + Returns: + Logger, logger instance named by sub_module and log_name. + """ + return logging.getLogger(name='{}.{}'.format(sub_module, log_name)) + + +def setup_logger(sub_module, log_name, **kwargs): + """ + Setup logger with sub module name and log file name. + + Args: + sub_module (str): Sub module name, also for sub directory under logroot. + log_name (str): Log name, also for log filename. + console (bool): Whether to output log to stdout. Default: False. + logfile (bool): Whether to output log to disk. Default: True. + level (Enum): Log level. Default: INFO. + formatter (str): Log format. + propagate (bool): Whether to enable propagate feature. Default: False. + maxBytes (int): Rotating max bytes. Default: 50M. + backupCount (int): Rotating backup count. Default: 30. + + Returns: + Logger, well-configured logger instance. + + Examples: + >>> from mindinsight.utils.log import setup_logger + >>> logger = setup_logger('datavisual', 'flask.request', level=logging.DEBUG) + + >>> from mindinsight.utils.log import get_logger + >>> logger = get_logger('datavisual', 'flask.request') + + >>> import logging + >>> logger = logging.getLogger('datavisual.flask.request') + """ + + logger = get_logger(sub_module, log_name) + if logger.hasHandlers(): + return logger + + level = kwargs.get('level', settings.LOG_LEVEL) + formatter = kwargs.get('formatter', None) + propagate = kwargs.get('propagate', False) + + logger.setLevel(level) + logger.propagate = propagate + + if not formatter: + formatter = settings.LOG_FORMAT + + if kwargs.get('console', False): + console_handler = logging.StreamHandler(sys.stdout) + console_handler.formatter = MindInsightFormatter(sub_module, formatter) + logger.addHandler(console_handler) + + if kwargs.get('logfile', True): + max_bytes = kwargs.get('maxBytes', settings.LOG_ROTATING_MAXBYTES) + + if not isinstance(max_bytes, int) or not max_bytes > 0: + raise MindInsightException(GeneralErrors.PARAM_VALUE_ERROR, + 'maxBytes should be int type and > 0.') + + backup_count = kwargs.get('backupCount', + settings.LOG_ROTATING_BACKUPCOUNT) + + if not isinstance(backup_count, int) or not backup_count > 0: + raise MindInsightException(GeneralErrors.PARAM_VALUE_ERROR, + 'backupCount should be int type and > 0.') + + logfile_dir = os.path.join(settings.WORKSPACE, 'log', sub_module) + + permissions = os.R_OK | os.W_OK | os.X_OK + mode = permissions << 6 + os.makedirs(logfile_dir, mode=mode, exist_ok=True) + + logfile_handler = MultiCompatibleRotatingFileHandler( + filename=os.path.join(logfile_dir, '{}.log'.format(log_name)), + maxBytes=max_bytes, + backupCount=backup_count, + encoding='utf8' + ) + logfile_handler.formatter = MindInsightFormatter(sub_module, formatter) + logger.addHandler(logfile_handler) + + return logger + + +utils_logger = setup_logger("utils", "utils") diff --git a/mindconverter/mindconverter/warn_info/supported_warn_infos.json b/mindconverter/mindconverter/warn_info/supported_warn_infos.json new file mode 100644 index 0000000000000000000000000000000000000000..07b81b60b45a9aad4b02296635917c86b4b60850 --- /dev/null +++ b/mindconverter/mindconverter/warn_info/supported_warn_infos.json @@ -0,0 +1,152 @@ +{ + "torch.all": "MindSpore没有 out 参数。MindSpore的 axis 有默认值,在 axis 是默认值情况下,对 x 所有元素进行逻辑与。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/all.html", + "torch.amax":"MindSpore没有 out 参数。MindSpore的 axis 有默认值,在 axis 是默认值情况下,求 x 所有元素的最大值。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/amax.html", + "torch.amin":"MindSpore没有 out 参数。MindSpore的 axis 有默认值,在 axis 是默认值情况下,求 x 所有元素的最小值。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/amin.html", + "torch.any":"MindSpore没有 out 参数。MindSpore的 axis 有默认值,在 axis 是默认值情况下,对 x 所有元素进行逻辑或。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/any.html", + "torch.argmin":"MindSpore此API实现功能与PyTorch基本一致,返回值类型为int32. https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/argmin.html", + "torch.bernoulli":"MindSpore: 参数 p 里保存了伯努利分布的概率值,默认值为0.5。 p 的shape需要和 input 的shape一致,返回值的shape和 input 的shape一致。 https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/bernoulli.html", + "torch.bucketize":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。 https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/bucketize.html", + "torch.broadcast_to":"MindSpore此API实现功能与PyTorch基本一致,额外支持shape中存在-1维度的情况。如果目标shape中有-1维度,它被该维度中的输入shape的值替换。如果目标shape中有-1维度,则-1维度不能位于一个不存在的维度中。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/broadcast_to.html", + "torch.cat":"MindSpore中tensors序列中的各Tensor精度必须保持一致,PyTorch中tensors序列中的各Tensor的精度可以不同 https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/cat.html", + "torch.cdist":"MindSpore此API功能与PyTorch基本一致,MindSpore无法指定是否使用矩阵乘的方式计算向量对之间的欧几里得距离。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/cdist.html", + "torch.cos":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/cos.html", + "torch.deg2rad":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/deg2rad.html", + "torch.diag":"MindSpore此API功能与PyTorch不一致。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/diag.html", + "torch.dot":"MindSpore此API功能与PyTorch不一致。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/tensor_dot.html", + "torch.erfinv":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/erfinv.html", + "torch.flatten":"通过 order 为”C”或”F”确定优先按行还是列展平。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/flatten.html", + "torch.float_power":"如果两个输入都是实数,MindSpore此API实现功能与PyTorch一致,仅参数名不同。目前不支持复数运算。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/float_power.html", + "torch.floor":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/floor.html", + "torch.ger":"MindSpore此API功能与PyTorch不一致。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/ger.html", + "torch.isclose":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/isclose.html", + "torch.linspace":"输出Tensor的dtype与参数 start 相同。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/linspace.html", + "torch.log":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/log.html", + "torch.log2":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/log2.html", + "torch.log10":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/log10.html", + "torch.max":"axis为None或者shape为空时,keepdims以及后面的参数均不生效,功能与torch.max(input)一致,此时索引固定返回0;否则,输出为元组(最大值, 最大值的索引),功能与torch.max(input, dim, keepdim=False, *, out=None)一致。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/max.html", + "torch.median":"根据指定 axis,输出 input 的中值与索引。keepdims 功能和PyTorch一致。与Pytorch不同,不论输入包含不包含 axis,MindSpore返回指定维度上的中值与索引。MindSpore没有 out 参数。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/median.html", + "torch.meshgrid":"MindSpore此API实现功能与PyTorch一致。inputs参数只支持Tensor,不支持scalar。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/meshgrid.html", + "torch.min":"axis为None或者shape为空时,keepdims以及后面的参数均不生效,功能与torch.min(input)一致,此时索引固定返回0;否则,输出为元组(最大值, 最大值的索引),功能与torch.min(input, dim, keepdim=False, *, out=None)一致。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/min.html", + "torch.multinomial":"MindSpore: 参数 replacement 的默认值为 True ,即每次采样后把采样的数据放回。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/multinomial.html", + "torch.mvlgamma":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/mvlgamma.html", + "torch.normal":"MindSpore: mean 和 std 支持的数据类型是Tensor,返回值的shape由 shape , mean , stddev 三者广播得到。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/normal.html", + "torch.ones":"MindSpore:MindSpore此API实现功能与PyTorch一致,仅参数名不同。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/ones.html", + "torch.poisson":"shape 决定了每个分布下采样的随机数张量的形状,返回值的shape是 mindspore.concat([shape, mindspore.shape(rate)], axis=0) 。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/poisson.html", + "torch.polygamma":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/polygamma.html", + "torch.prod":"根据指定 axis,对 input 中元素求乘积。keep_dims 功能和PyTorch一致。MindSpore没有 dtype 参数。MindSpore的 axis 有默认值,在 axis 是默认值情况下,对 input 所有元素求乘积。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/prod.html", + "torch.rad2deg":"MindSpore此API功能与PyTorch一致,参数支持的数据类型有差异。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/rad2deg.html", + "torch.randint":"MindSpore:low 为必选输入,无默认值。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/randint.html", + "torch.randint_like":"MindSpore:low 为必选输入,无默认值。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/randint_like.html", + "torch.range":"MindSpore: 输出Tensor的dtype与输入Tensor相同。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/range.html", + "torch.renorm":"MindSpore:参数 p 的数据类型是 int 。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/renorm.html", + "torch.scatter":"MindSpore: index 的shape必须和 src 的shape一致,即 src 的所有数据都会被 index 分散到 input 里。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/scatter.html", + "torch.scatter_add":"MindSpore: indices 的shape必须和 updates 的shape一致,即 updates 的所有数据都会被 indices 分散到 input_x 里。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/scatter_add.html", + "torch.std":"MindSpore:输出Tensor各维度上的标准差,也可以按照 axis 对指定维度求标准差。如果 ddof 是布尔值,和 unbiased 作用相同; ddof 为整数,计算中使用的除数是 N−ddof,其中N表示元素的数量。keepdim 控制输出和输入的维度是否相同。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/std.html", + "torch.std_mean":"MindSpore:输出Tensor各维度上的标准差和均值,也可以按照 axis 对指定维度求标准差和均值。如果 ddof 是布尔值,和 unbiased 作用相同; ddof 为整数,计算中使用的除数是 N−ddof,其中N表示元素的数量。keepdim 控制输出和输入的维度是否相同。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/std_mean.html", + "torch.svd":"MindSpore此API功能与PyTorch不一致。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/svd.html", + "torch.unique":"MindSpore此API功能与PyTorch不一致。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/unique.html", + "torch.var":"MindSpore:输出Tensor各维度上的方差,也可以按照 axis 对指定维度求方差。如果 ddof 是布尔值,和 unbiased 作用相同; ddof 为整数,计算中使用的除数是 N−ddof,其中N表示元素的数量。keepdim 控制输出和输入的维度是否相同。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/var.html", + "torch.var_mean":"MindSpore:输出Tensor各维度上的方差和均值,也可以按照 axis 对指定维度求方差和均值。如果 ddof 是布尔值,和 unbiased 作用相同; ddof 为整数,计算中使用的除数是 N−ddof,其中N表示元素的数量。keepdim 控制输出和输入的维度是否相同。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/var_mean.html", + "torch.zeros":"MindSpore:生成shape为 size 的填充值为0的Tensor。https://www.mindspore.cn/docs/zh-CN/r2.2/note/api_mapping/pytorch_diff/zeros.html", + "torch.nn.AdaptiveAvgPool1d": "MindSpore:MindSpore此API目前只支持3D数据,要求输入数据的最后一个维度长度要大于输出大小,并且必须整除output_size。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/AdaptiveAvgPool1d.html", + "torch.nn.AdaptiveMaxPool1d": "MindSpore:MindSpore此API目前只支持3D数据,要求输入数据的最后一个维度长度要大于输出大小,并且必须整除output_size;目前不支持返回最大值的索引下标。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/AdaptiveMaxPool1d.html", + "torch.nn.AvgPool1d": "MindSpore:MindSpore此API实现功能同时兼容TensorFlow和PyTorch,pad_mode 为 “valid” 或者 “same” 时,功能与TensorFlow一致,pad_mode 为 “pad” 时,功能与PyTorch一致,MindSpore相比PyTorch1.8.1额外支持了维度为2的输入,与PyTorch1.12一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/AvgPool1d.html", + "torch.nn.AvgPool2d": "MindSpore:MindSpore此API实现功能同时兼容TensorFlow和PyTorch,pad_mode 为 “valid” 或者 “same” 时,功能与TensorFlow一致,pad_mode 为 “pad” 时,功能与PyTorch一致,MindSpore相比PyTorch1.8.1额外支持了维度为3的输入,与PyTorch1.12一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/AvgPool2d.html", + "torch.nn.AvgPool3d": "MindSpore:MindSpore此API实现功能同时兼容TensorFlow和PyTorch,pad_mode 为 “valid” 或者 “same” 时,功能与TensorFlow一致,pad_mode 为 “pad” 时,功能与PyTorch一致,MindSpore相比PyTorch1.8.1额外支持了维度为4的输入,与PyTorch1.12一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/AvgPool3d.html", + "torch.nn.BCEWithLogitsLoss": "MindSpore:MindSpore此API实现功能与PyTorch一致,仅输入参数名不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/BCEWithLogitsLoss.html", + "torch.nn.BatchNorm1d": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。MindSpore中momentum参数默认值为0.9,与PyTorch的momentum转换关系为1-momentum,默认值行为与PyTorch相同;训练以及推理时的参数更新策略和PyTorch有所不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/BatchNorm1d.html", + "torch.nn.BatchNorm2d": "MindSpore:此API实现功能与PyTorch基本一致,典型区别有两点。MindSpore中momentum参数默认值为0.9,与PyTorch的momentum转换关系为1-momentum,默认值行为与PyTorch相同;训练以及推理时的参数更新策略和PyTorch有所不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/BatchNorm2d.html", + "torch.nn.BatchNorm3d": "MindSpore:此API实现功能与PyTorch基本一致,典型区别有两点。MindSpore中momentum参数默认值为0.9,与PyTorch的momentum转换关系为1-momentum,默认值行为与PyTorch相同;训练以及推理时的参数更新策略和PyTorch有所不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/BatchNorm3d.html", + "torch.nn.Bilinear": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。可以通过 weight_init 和 bias_init 分别设置权重和偏置的初始化方法,PyTorch无此功能。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/BiDense.html", + "torch.nn.Conv1d": "MindSpore:与PyTorch实现的功能基本一致,但存在偏置差异和填充差异。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Conv1d.html", + "torch.nn.Conv2d": "MindSpore:与PyTorch实现的功能基本一致,但存在偏置差异和填充差异。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Conv2d.html", + "torch.nn.Conv3d": "MindSpore:与PyTorch实现的功能基本一致,但存在偏置差异和填充差异。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Conv3d.html", + "torch.nn.ConvTranspose1d": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,新增了填充模式参数”pad_mode”,当”pad_mode” = “pad”时与PyTorch默认方式相同,利用weight_init 和bias_init 参数可以配置初始化方式。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Conv1dTranspose.html", + "torch.nn.ConvTranspose2d": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,新增了填充模式参数”pad_mode”,当”pad_mode” = “pad”时与PyTorch默认方式相同,利用weight_init和bias_init参数可以配置初始化方式。此外,torch.nn.ConvTranspose2d有一个output_padding参数,其功能是指对反卷积后的特征图进行单侧补零(右侧和下侧),而mindspore.nn.Conv2dTranspose中目前没有该参数,可以对输出结果使用nn.Pad进行补维来代替。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Conv2dTranspose.html", + "torch.nn.ConvTranspose3d": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,新增了填充模式参数”pad_mode”,当”pad_mode” = “pad”时与PyTorch默认方式相同,利用weight_init 和bias_init 参数可以配置初始化方式。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Conv3dTranspose.html", + "torch.nn.CrossEntropyLoss": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,而且目标值支持两种不同的数据形式:类别索引和类别概率。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/CrossEntropyLoss.html", + "torch.nn.Dropout": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。keep_prob 是输入神经元保留率,现已废弃。dtype 设置输出Tensor的数据类型,现已废弃。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Dropout.html", + "torch.nn.GaussianNLLLoss": "MindSpore:与PyTorch实现同样的功能。如果var中存在小于0的数字,PyTorch会直接报错,而MindSpore则会计算max(var, eps) https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/GaussianNLLLoss.html", + "torch.nn.GELU": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/GELU.html", + "torch.nn.GRU": "MindSpore:功能一致,多一个接口输入seq_length,表示输入batch中每个序列的长度。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/GRU.html", + "torch.nn.GRUCell": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/GRUCell.html", + "torch.nn.GroupNorm": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,MindSpore还可以对需要学习的放射参数进行额外的初始化。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/GroupNorm.html", + "torch.nn.Hardshrink": "MindSpore:MindSpore此API实现功能与PyTorch一致,仅参数名不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/HShrink.html", + "torch.nn.InstanceNorm1d": "MindSpore:此API实现功能与PyTorch基本一致,但目前只能对三维数据进行归一化,典型区别有两点。MindSpore中affine参数默认值为True,会对内部参数 γ 和 β 进行学习,PyTorch默认值为False,不进行参数学习;PyTorch支持track_running_stats参数,如果设置为True,会在推理中使用训练得到的均值和方差,默认值为False,MindSpore无此参数,在训练和推理中都会使用输入数据的计算均值和方差,与PyTorch的默认值行为相同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/InstanceNorm1d.html", + "torch.nn.InstanceNorm2d": "MindSpore:此API实现功能与PyTorch基本一致,典型区别有两点。MindSpore中affine参数默认值为True,会对内部参数 γ 和 β 进行学习,PyTorch默认值为False,不进行参数学习;PyTorch支持track_running_stats参数,如果设置为True,会在推理中使用训练得到的均值和方差,默认值为False,MindSpore无此参数,在训练和推理中都会使用输入数据的计算均值和方差,与PyTorch的默认值行为相同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/InstanceNorm2d.html", + "torch.nn.InstanceNorm3d": "MindSpore:此API实现功能与PyTorch基本一致,典型区别有两点。MindSpore中affine参数默认值为True,会对内部参数 γ 和 β 进行学习,PyTorch默认值为False,不进行参数学习;PyTorch支持track_running_stats参数,如果设置为True,会在推理中使用训练得到的均值和方差,默认值为False,MindSpore无此参数,在训练和推理中都会使用输入数据的计算均值和方差,与PyTorch的默认值行为相同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/InstanceNorm3d.html", + "torch.nn.LayerNorm": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,但MindSpore中不存在参数elementwise_affine,同时增加了参数begin_norm_axis控制归一化开始计算的轴,参数begin_params_axis控制第一个参数(beta, gamma)的维度,以及参数gamma_init和beta_init用来控制γ参数和β参数的初始化方法。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/LayerNorm.html", + "torch.nn.LeakyReLU": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,其中参数alpha与PyTorch中的参数negative_slope功能一致,参数名不同,默认值不同;但MindSpore不存在inplace参数。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/LeakyReLU.html", + "torch.nn.LSTM": "MindSpore:若不指定PyTorch中的proj_size参数,MindSpore此API实现的功能与PyTorch一致,仅部分参数名不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/LSTM.html", + "torch.nn.LSTMCell": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,返回值在形式上有差异。PyTorch中返回h_1和 c_1,MindSpore中返回hx’,是两个Tensor组成的的元组(h’, c’)。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/LSTMCell.html", + "torch.nn.Linear": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,而且可以在全连接层后添加激活函数。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Dense.html", + "torch.nn.LocalResponseNorm": "MindSpore:MindSpore此API实现功能与PyTorch一致。MindSpore的 depth_radius 参数与PyTorch的 size 实现同样的功能,但存在一个二倍映射关系:size=2*depth_radius。目前mindspore.nn.LRN与tf.raw_ops.LRN能完全对标,两者能达到相同的精度;如果与torch.nn.LocalResponseNorm相比,会存在1e-3的精度差异。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/LRN.html", + "torch.nn.MaxPool1d": "MindSpore:MindSpore此API实现功能同时兼容TensorFlow和PyTorch,pad_mode 为 “valid” 或者 “same” 时,功能与TensorFlow一致,pad_mode 为 “pad” 时,功能与PyTorch一致,MindSpore相比PyTorch1.8.1额外支持了维度为2的输入,与PyTorch1.12一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/MaxPool1d.html", + "torch.nn.MaxPool2d": "MindSpore:MindSpore此API实现功能同时兼容TensorFlow和PyTorch,pad_mode 为 “valid” 或者 “same” 时,功能与TensorFlow一致,pad_mode 为 “pad” 时,功能与PyTorch一致,MindSpore相比PyTorch1.8.1额外支持了维度为2的输入,与PyTorch1.12一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/MaxPool2d.html", + "torch.nn.MaxPool3d": "MindSpore:MindSpore此API实现功能同时兼容TensorFlow和PyTorch,pad_mode 为 “valid” 或者 “same” 时,功能与TensorFlow一致,pad_mode 为 “pad” 时,功能与PyTorch一致,MindSpore相比PyTorch1.8.1额外支持了维度为2的输入,与PyTorch1.12一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/MaxPool3d.html", + "torch.nn.ModuleDict": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。CellDict支持的Cell的类型与ModuleDict有两点不一致, https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/CellDict.html", + "torch.nn.MultiheadAttention": "该算子MindSpore与Pytorch存在差异,详见 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/MultiheadAttention.html", + "torch.nn.NLLLoss": "MindSpore:除两个在PyTorch已弃用的参数不同外,功能上无差异。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/NLLLoss.html", + "torch.nn.PReLU": "MindSpore:MindSpore此算子功能与PyTorch一致,仅参数名不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/PReLU.html", + "torch.nn.ReLU": "MindSpore:MindSpore此算子实现功能与PyTorch一致,仅参数设置不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/ReLU.html", + "torch.nn.RNN": "MindSpore:实现与PyTorch一致的功能。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/RNN.html", + "torch.nn.RNNCell": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/RNNCell.html", + "torch.nn.Sequential": "MindSpore:构造Cell顺序容器。入参类型和PyTorch一致。和PyTorch相比,MindSpore支持append(),在容器末尾添加Cell。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/SequentialCell.html", + "torch.nn.Sigmoid": "MindSpore:MindSpore此API实现功能与PyTorch一致,仅实例化后输入的参数名不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Sigmoid.html", + "torch.nn.SmoothL1Loss": "MindSpore:除两个在PyTorch已弃用的参数不同外,功能上无差异。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/SmoothL1Loss.html", + "torch.nn.Softmax": "MindSpore:MindSpore此API实现功能与PyTorch一致,仅参数名不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/nn_Softmax.html", + "torch.nn.Softmin": "MindSpore:支持使用 axis参数实例化,将指定维度元素缩放到[0, 1]之间并且总和为1,默认值:-1。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/softmin.html", + "torch.nn.Softshrink": "MindSpore:接口名称与PyTorch有差异,MindSpore为SoftShrink,PyTorch为Softshrink,功能一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/SoftShrink.html", + "torch.nn.SyncBatchNorm": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。MindSpore输入仅支持二维和四维。MindSpore中momentum参数默认值为0.9,与PyTorch的momentum转换关系为1-momentum,默认值行为与PyTorch相同;训练以及推理时的参数更新策略和PyTorch有所不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/SyncBatchNorm.html", + "torch.nn.Tanh": "MindSpore:MindSpore此API实现功能与PyTorch一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Tanh.html", + "torch.nn.Transformer": "该算子MindSpore与Pytorch存在差异,详见 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Transformer.html", + "torch.nn.TransformerDecoder": "该算子MindSpore与Pytorch存在差异,详见 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/TransformerDecoder.html", + "torch.nn.TransformerEncoder": "该算子MindSpore与Pytorch存在差异,详见 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/TransformerEncoder.html", + "torch.nn.TransformerDecoderLayer": "该算子MindSpore与Pytorch存在差异,详见 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/TransformerDecoderLayer.html", + "torch.nn.TransformerEncoderLayer": "该算子MindSpore与Pytorch存在差异,详见 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/TransformerEncoderLayer.html", + "torch.nn.Unfold": "MindSpore:MindSpore此API实现功能与PyTorch功能有差异。PyTorch的kernel_size、stride和dilation支持int和tuple输入,padding支持在输入的两侧添加的隐式零填充。而MindSpore的ksizes、strides和rates三个参数的格式必须是(1, row, col, 1),padding参数支持两种格式same和valid。MindSpore输入是四维张量,shape为(in_batch, in_depth, in_row, int_col),输出是shape为(out_batch, out_depth, out_row, out_col)的四维Tensor,其中out_batch和in_batch相同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/Unfold.html", + "torch.nn.Upsample": "MindSpore:和PyTorch实现功能基本一致,但是对于一些参数支持不完善,例如一些模式不能直接传入 scale_factor ,但可以通过设置 recompute_scale_factor 参数为True进行规避(当 scale_factor 为浮点数时,可能产生精度误差),具体差异如下。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/interpolate.html", + "torch.nn.functional.adaptive_avg_pool1d": "MindSpore:MindSpore此API目前只支持3D数据,要求输入数据的最后一个维度长度要大于输出大小,并且必须整除output_size。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/AdaptiveAvgPool1d.html", + "torch.nn.functional.adaptive_max_pool1d": "MindSpore:MindSpore此API目前只支持3D数据,要求输入数据的最后一个维度长度要大于输出大小,并且必须整除output_size;目前不支持返回最大值的索引下标。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/AdaptiveMaxPool1d.html", + "torch.nn.functional.avg_pool1d": "MindSpore:MindSpore此API功能与pytorch基本一致,部分输入默认值不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/avg_pool1d.html", + "torch.nn.functional.avg_pool2d": "MindSpore:MindSpore此API功能与pytorch基本一致,部分输入默认值不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/avg_pool1d.html", + "torch.nn.functional.avg_pool3d": "MindSpore:MindSpore此API功能与pytorch基本一致,部分输入默认值不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/avg_pool1d.html", + "torch.nn.functional.dropout": "该算子MindSpore与Pytorch存在差异,详见 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/drop_out.html", + "torch.nn.functional.elu": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,不过α目前只支持1.0。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/elu.html", + "torch.nn.functional.fold": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/mindspore.ops.fold.html", + "torch.nn.functional.gelu": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/mindspore.ops.gelu.html", + "torch.nn.functional.grid_sample": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,不过暂不支持mode为“bicubic”。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/grid_sample.html", + "torch.nn.functional.interpolate": "MindSpore:和PyTorch实现功能基本一致,但是对于一些参数支持不完善,例如一些模式不能直接传入 scale_factor ,但可以通过设置 recompute_scale_factor 参数为True进行规避(当 scale_factor 为浮点数时,可能产生精度误差),具体差异如下。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/interpolate.html", + "torch.nn.functional.kl_div": "MindSpore:MindSpore此API实现功能与PyTorch一致,但未设置 log_target 参数。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/kl_div.html", + "torch.nn.functional.leaky_relu": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。不同的是,MindSpore中 alpha 的初始值是0.2,PyTorch中对应的 negative_slope 初始值是0.01。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/leaky_relu.html", + "torch.nn.functional.log_softmax": "MindSpore:支持使用axis参数和logits输入实现函数,对softmax的结果取对数。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/log_softmax.html", + "torch.nn.functional.pad": "MindSpore:MindSpore的padding参数与PyTorch的pad参数功能用法完全一致,另外MindSpore相比PyTorch额外支持了Tensor类型的入参形式。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/pad.html", + "torch.nn.functional.softmax": "MindSpore:支持使用 axis参数和x输入实现函数,将指定维度元素缩放到[0, 1]之间并且总和为1。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/softmax.html", + "torch.nn.functional.softmin": "MindSpore:支持使用 axis参数实例化,将指定维度元素缩放到[0, 1]之间并且总和为1,默认值:-1。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/softmin.html", + "torch.nn.functional.upsample": "MindSpore:和PyTorch实现功能基本一致,但是对于一些参数支持不完善,例如一些模式不能直接传入 scale_factor ,但可以通过设置 recompute_scale_factor 参数为True进行规避(当 scale_factor 为浮点数时,可能产生精度误差),具体差异如下。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/interpolate.html", + ".all": "MindSpore:根据指定 axis,对 x 的元素进行逻辑与。keep_dims 功能和PyTorch一致。MindSpore没有 out 参数。MindSpore的 axis 有默认值,在 axis 是默认值情况下,对 x 所有元素进行逻辑与。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/all.html", + ".amax": "MindSpore:根据指定 axis,求 x 的最大值元素。keepdims 功能和PyTorch一致。MindSpore没有 out 参数。MindSpore的 axis 有默认值,在 axis 是默认值情况下,求 x 所有元素的最大值。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/amax.html", + ".amin": "MindSpore:根据指定 axis,求 x 的最小值元素。keepdims 功能和PyTorch一致。MindSpore没有 out 参数。MindSpore的 axis 有默认值,在 axis 是默认值情况下,求 x 所有元素的最小值。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/amin.html", + ".any": "MindSpore:根据指定 axis,对 x 的元素进行逻辑或。keep_dims 功能和PyTorch一致。MindSpore没有 out 参数。MindSpore的 axis 有默认值,在 axis 是默认值情况下,对 x 所有元素进行逻辑或。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/any.html", + ".argmin": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,返回值类型为int32. https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/argmin.html", + ".broadcast_to": "MindSpore:MindSpore此API实现功能与PyTorch基本一致,额外支持shape中存在-1维度的情况。如果目标shape中有-1维度,它被该维度中的输入shape的值替换。如果目标shape中有-1维度,则-1维度不能位于一个不存在的维度中。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/broadcast_to.html", + ".diag": "MindSpore:MindSpore此API,若输入为一维张量,则实现与PyTorch相同的功能;若输入为矩阵,则不能实现与PyTorch相同的功能,且没有diagonal参数控制对角线的位置。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/diag.html", + ".expand": "MindSpore:shape 为广播后的目标shape,其类型可以为 tuple[int] 。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/expand.html", + ".flip": "MindSpore:mindspore.flip与mindspore.Tensor.flip接口功能与torch.flip一致,均不支持入参为int的场景。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/flip.html", + ".float_power": "MindSpore:如果两个输入都是实数,MindSpore此API实现功能与PyTorch一致,仅参数名不同。目前不支持复数运算。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/float_power.html", + ".ger": "MindSpore: 参数 input 和 other 的数据类型支持float16/32/64,必须是相同的数据类型,返回值的数据类型和输入一致。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/ger.html", + ".item": "MindSpore:返回Tensor中指定index的值,适用于一个或多个元素的Tensor。返回值仍为Tensor。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/item.html", + ".masked_scatter": "MindSpore:MindSpore此API实现功能与PyTorch基本一致。但是PyTorch支持 mask 与Tensor本身的双向广播, https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/masked_scatter.html", + ".max": "该算子MindSpore与Pytorch存在差异,详见 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/tensor_max.html", + ".median": "MindSpore:根据指定 axis,输出 input 的中值与索引。keepdims 功能和PyTorch一致。与Pytorch不同,不论输入包含不包含 axis,MindSpore返回指定维度上的中值与索引。MindSpore没有 out 参数。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/median.html", + ".min": "该算子MindSpore与Pytorch存在差异,详见 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/tensor_min.html", + ".prod": "MindSpore:根据指定 axis,对 input 中元素求乘积。keep_dims 功能和PyTorch一致。MindSpore没有 dtype 参数。MindSpore的 axis 有默认值,在 axis 是默认值情况下,对 input 所有元素求乘积。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/prod.html", + ".repeat": "该算子MindSpore与Pytorch存在差异,详见 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/tensor_repeat.html", + ".scatter_": "MindSpore:MindSpore此算子实现功能与PyTorch一致,PyTorch中该接口为Tensor接口,调用方式略有不同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/tensor_scatter_elements.html", + ".scatter": "MindSpore: index 的shape必须和 src 的shape一致,即 src 的所有数据都会被 index 分散到 input 里。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/scatter.html", + ".std": "MindSpore:输出Tensor各维度上的标准差,也可以按照 axis 对指定维度求标准差。如果 ddof 是布尔值,和 unbiased 作用相同; ddof 为整数,计算中使用的除数是 N−ddof,其中N表示元素的数量。keepdim 控制输出和输入的维度是否相同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/std.html", + ".svd": "MindSpore: https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/svd.html", + ".take": "MindSpore:在指定维度上获取Tensor中的元素。可以指定维度,默认使用展开的输入数组。若索引超出范围:mode为’raise’时,则抛出异常;mode为’wrap’时,绕接;mode为’raise’时,裁剪到范围。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/take.html", + ".to": "MindSpore:仅支持 dtype 参数,返回指定数据类型的Tensor。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/to.html", + ".var": "MindSpore:输出Tensor各维度上的方差,也可以按照 axis 对指定维度求方差。如果 ddof 是布尔值,和 unbiased 作用相同; ddof 为整数,计算中使用的除数是 N−ddof,其中N表示元素的数量。keepdim 控制输出和输入的维度是否相同。 https://www.mindspore.cn/docs/zh-CN/master/note/api_mapping/pytorch_diff/var.html" +} \ No newline at end of file diff --git a/mindconverter/mindconverter/warn_info/unsupported_warn_infos.json b/mindconverter/mindconverter/warn_info/unsupported_warn_infos.json new file mode 100644 index 0000000000000000000000000000000000000000..57a10df73b26738f81e33ef405035b8beebd6f34 --- /dev/null +++ b/mindconverter/mindconverter/warn_info/unsupported_warn_infos.json @@ -0,0 +1,3 @@ +{ + "torch.no_grad": "Mindspore 不支持这个算子" +} \ No newline at end of file diff --git a/mindconverter/requirements.txt b/mindconverter/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..46ce6e06cab0b6225ab8529418b4e8b1f45c239f --- /dev/null +++ b/mindconverter/requirements.txt @@ -0,0 +1,7 @@ +google-pasta +numpy>=1.17.0 +protobuf>=3.13.0 +yapf +networkx +fuzzywuzzy +beautifulsoup4 \ No newline at end of file diff --git a/mindconverter/setup.py b/mindconverter/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..60c429933853e09ee2897499fda7250b5c702158 --- /dev/null +++ b/mindconverter/setup.py @@ -0,0 +1,221 @@ +# Copyright 2019-2021 Huawei Technologies Co., Ltd.All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Setup.""" + +import sys +import os +import shutil +import stat +import platform +import shlex +import subprocess +import types +from importlib import import_module +from setuptools import setup +from setuptools.command.egg_info import egg_info +from setuptools.command.build_py import build_py +from setuptools.command.install import install + + +def get_readme_content(): + pwd = os.path.dirname(os.path.realpath(__file__)) + with open(os.path.join(pwd, 'README.md'), encoding='UTF-8') as f: + return f.read() + + +def get_version(): + """ + Get version. + + Returns: + str, mindconverter version. + """ + machinery = import_module('importlib.machinery') + module_path = os.path.join(os.path.dirname(__file__), 'mindconverter', '_version.py') + module_name = '__mindconverterversion__' + + version_module = types.ModuleType(module_name) + loader = machinery.SourceFileLoader(module_name, module_path) + loader.exec_module(version_module) + return version_module.VERSION + + +def get_platform(): + """ + Get platform name. + + Returns: + str, platform name in lowercase. + """ + return platform.system().strip().lower() + + +def get_description(): + """ + Get description. + + Returns: + str, wheel package description. + """ + os_info = get_platform() + cpu_info = platform.machine().strip() + + cmd = "git log --format='[sha1]:%h, [branch]:%d' -1" + process = subprocess.Popen( + shlex.split(cmd), + shell=False, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + stdout, _ = process.communicate() + if not process.returncode: + git_version = stdout.decode().strip() + return 'mindconverter platform: %s, cpu: %s, git version: %s' % (os_info, cpu_info, git_version) + + return 'mindconverter platform: %s, cpu: %s' % (os_info, cpu_info) + + +def get_install_requires(): + """ + Get install requirements. + + Returns: + list, list of dependent packages. + """ + with open('requirements.txt') as file: + return file.read().strip().splitlines() + + +def update_permissions(path): + """ + Update permissions. + + Args: + path (str): Target directory path. + """ + for dirpath, dirnames, filenames in os.walk(path): + for dirname in dirnames: + dir_fullpath = os.path.join(dirpath, dirname) + os.chmod(dir_fullpath, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC | stat.S_IRGRP | stat.S_IXGRP) + for filename in filenames: + file_fullpath = os.path.join(dirpath, filename) + os.chmod(file_fullpath, stat.S_IREAD) + + +def run_script(script): + """ + Run script. + + Args: + script (str): Target script file path. + + Returns: + int, return code. + """ + cmd = '/bin/bash {}'.format(script) + process = subprocess.Popen( + shlex.split(cmd), + shell=False + ) + return process.wait() + + +class EggInfo(egg_info): + """Egg info.""" + + def run(self): + egg_info_dir = os.path.join(os.path.dirname(__file__), 'mindconverter.egg-info') + shutil.rmtree(egg_info_dir, ignore_errors=True) + super().run() + update_permissions(egg_info_dir) + + +class BuildPy(build_py): + """Build py files.""" + + def run(self): + mindconverter_lib_dir = os.path.join(os.path.dirname(__file__), 'build', 'lib', 'mindconverter') + shutil.rmtree(mindconverter_lib_dir, ignore_errors=True) + super().run() + update_permissions(mindconverter_lib_dir) + + +class Install(install): + """Install.""" + + def run(self): + super().run() + if sys.argv[-1] == 'install': + pip = import_module('pip') + mindconverter_dir = os.path.join(os.path.dirname(pip.__path__[0]), 'mindconverter') + update_permissions(mindconverter_dir) + + +if __name__ == '__main__': + version_info = sys.version_info + if (version_info.major, version_info.minor) < (3, 7): + sys.stderr.write('Python version should be at least 3.7\r\n') + sys.exit(1) + + setup( + name='mindconverter', + version=get_version(), + author='The MindSpore Authors', + author_email='contact@mindspore.cn', + url='https://www.mindspore.cn', + download_url='待修改', + project_urls={ + 'Sources': '待修改', + 'Issue Tracker': '待修改', + }, + description=get_description(), + long_description=get_readme_content(), + long_description_content_type="text/markdown", + packages=['mindconverter','mindconverter.common','mindconverter.conf','mindconverter.mappings','mindconverter.ops','mindconverter.utils','mindconverter.warn_info'], + platforms=[get_platform()], + include_package_data=True, + cmdclass={ + 'egg_info': EggInfo, + 'build_py': BuildPy, + 'install': Install, + }, + entry_points={ + 'console_scripts': [ + 'mindconverter=mindconverter.cli:cli_entry', + ], + }, + python_requires='>=3.7', + install_requires=get_install_requires(), + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Environment :: Console', + 'Environment :: Web Environment', + 'Intended Audience :: Science/Research', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Software Development', + 'Topic :: Software Development :: Libraries', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + license='Apache 2.0', + keywords='mindconverter', + ) \ No newline at end of file