From 2029eff54053bbe5e05953bd1f12de6b2546e010 Mon Sep 17 00:00:00 2001 From: Zhuoran Zhao Date: Mon, 29 Apr 2024 12:27:29 -0700 Subject: [PATCH] Disable AITModel in fx2ait on AMD Summary: as title, there are still some utilities functions needed from fx2ait for aot_Inductor_lower before we fully moved everything to PT2 full stack Reviewed By: chenyang78 Differential Revision: D56613348 --- fx2ait/fx2ait/extension.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/fx2ait/fx2ait/extension.py b/fx2ait/fx2ait/extension.py index b45c98d21..dc1783067 100644 --- a/fx2ait/fx2ait/extension.py +++ b/fx2ait/fx2ait/extension.py @@ -43,14 +43,18 @@ def _get_extension_path(lib_name): return ext_specs.origin -try: - torch.ops.load_library("//deeplearning/ait:AITModel") - logger.info("===Load non-OSS AITModel===") - -except (ImportError, OSError): - lib_path = _get_extension_path("libait_model") - torch.ops.load_library(lib_path) - logger.info("===Load OSS AITModel===") - - def is_oss_ait_model(): # noqa: F811 - return True +if torch.version.hip is None: + # For Meta internal workloads, we don't have an active plan to apply AITemplate on AMD GPUs. + # As such, for AMD build we skip all AITemplate related supports. T186819748 is used to + # track the plans/strategies for AITemplate enablement on AMD GPUs if needed in the future. + try: + torch.ops.load_library("//deeplearning/ait:AITModel") + logger.info("===Load non-OSS AITModel===") + + except (ImportError, OSError): + lib_path = _get_extension_path("libait_model") + torch.ops.load_library(lib_path) + logger.info("===Load OSS AITModel===") + + def is_oss_ait_model(): # noqa: F811 + return True