Skip to content

Commit

Permalink
Remove unused argument --target_platform (#1279)
Browse files Browse the repository at this point in the history
Signed-off-by: Partho Sarthi <[email protected]>
  • Loading branch information
parthosa authored Aug 12, 2024
1 parent 46f7cab commit bec93f0
Show file tree
Hide file tree
Showing 4 changed files with 2 additions and 38 deletions.
6 changes: 0 additions & 6 deletions user_tools/src/spark_rapids_pytools/cloud_api/sp_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,12 +87,6 @@ class ClusterState(EnumeratedType):
UNKNOWN = 'unknown'


class TargetPlatform(EnumeratedType):
"""Determine CostSavings for target platform based on OnPrem cluster configuration"""
DATAPROC = 'dataproc'
NONE = None


class SparkNodeType(EnumeratedType):
"""
Node type from Spark perspective. We either have a master node or a worker node.
Expand Down
2 changes: 0 additions & 2 deletions user_tools/src/spark_rapids_pytools/rapids/qualification.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,8 +275,6 @@ def _process_custom_args(self) -> None:
cuda_arg = self.wrapper_options.get('cuda')
if cuda_arg is not None:
cuda = cuda_arg
target_platform = self.wrapper_options.get('targetPlatform')
self.ctxt.set_ctxt('targetPlatform', target_platform)
self.ctxt.set_ctxt('gpuPerMachine', gpu_per_machine)
self.ctxt.set_ctxt('gpuDevice', gpu_device)
self.ctxt.set_ctxt('cuda', cuda)
Expand Down
22 changes: 1 addition & 21 deletions user_tools/src/spark_rapids_tools/cmdli/argprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -446,15 +446,13 @@ class QualifyUserArgModel(ToolUserArgModel):
Represents the arguments collected by the user to run the qualification tool.
This is used as doing preliminary validation against some of the common pattern
"""
target_platform: Optional[CspEnv] = None
filter_apps: Optional[QualFilterApp] = None
gpu_cluster_recommendation: Optional[QualGpuClusterReshapeType] = None
estimation_model_args: Optional[Dict] = dataclasses.field(default_factory=dict)

def init_tool_args(self) -> None:
self.p_args['toolArgs']['platform'] = self.platform
self.p_args['toolArgs']['savingsCalculations'] = False
self.p_args['toolArgs']['targetPlatform'] = self.target_platform
# check the filter_apps argument
if self.filter_apps is None:
self.p_args['toolArgs']['filterApps'] = QualFilterApp.get_default()
Expand Down Expand Up @@ -487,22 +485,6 @@ def build_tools_args(self) -> dict:
# At this point, if the platform is still none, then we can set it to the default value
# which is the onPrem platform.
runtime_platform = self.get_or_set_platform()
# check the targetPlatform argument
if self.p_args['toolArgs']['targetPlatform']:
equivalent_pricing_list = runtime_platform.get_equivalent_pricing_platform()
if not equivalent_pricing_list:
# no target_platform for that runtime environment
self.logger.info(
'Argument target_platform does not support the current cluster [%s]', runtime_platform)
self.p_args['toolArgs']['targetPlatform'] = None
else:
if not self.p_args['toolArgs']['targetPlatform'] in equivalent_pricing_list:
target_platform = self.p_args['toolArgs']['targetPlatform']
raise PydanticCustomError(
'invalid_argument',
f'The platform [{target_platform}] is currently '
f'not supported to calculate savings from [{runtime_platform}] cluster\n Error:')

# process JVM arguments
self.process_jvm_args()

Expand All @@ -512,9 +494,7 @@ def build_tools_args(self) -> dict:
'outputFolder': self.output_folder,
'platformOpts': {
'credentialFile': None,
'deployMode': DeployMode.LOCAL,
# used to be sent to the scala core java cmd
'targetPlatform': self.p_args['toolArgs']['targetPlatform']
'deployMode': DeployMode.LOCAL
},
'migrationClustersProps': {
'cpuCluster': self.cluster,
Expand Down
10 changes: 1 addition & 9 deletions user_tools/src/spark_rapids_tools/cmdli/tools_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ def qualification(self,
eventlogs: str = None,
cluster: str = None,
platform: str = None,
target_platform: str = None,
output_folder: str = None,
filter_apps: str = None,
custom_model_file: str = None,
Expand All @@ -66,12 +65,6 @@ def qualification(self,
Name or ID (for databricks platforms) of cluster or path to cluster-properties.
:param platform: defines one of the following "onprem", "emr", "dataproc", "dataproc-gke",
"databricks-aws", and "databricks-azure".
:param target_platform: Cost savings and speedup recommendation for comparable cluster in
target_platform based on on-premises cluster configuration.
Currently only `dataproc` is supported for target_platform.
If not provided, the final report will be limited to GPU speedups only without
cost-savings.
:param output_folder: path to store the output
:param tools_jar: Path to a bundled jar including Rapids tool. The path is a local filesystem,
or remote cloud storage url. If missing, the wrapper downloads the latest rapids-4-spark-tools_*.jar
Expand All @@ -97,7 +90,7 @@ def qualification(self,
https://docs.nvidia.com/spark-rapids/user-guide/latest/qualification/jar-usage.html#running-the-qualification-tool-standalone-on-spark-event-logs
"""
platform = Utils.get_value_or_pop(platform, rapids_options, 'p')
target_platform = Utils.get_value_or_pop(target_platform, rapids_options, 't')
tools_jar = Utils.get_value_or_pop(tools_jar, rapids_options, 't')
output_folder = Utils.get_value_or_pop(output_folder, rapids_options, 'o')
filter_apps = Utils.get_value_or_pop(filter_apps, rapids_options, 'f')
verbose = Utils.get_value_or_pop(verbose, rapids_options, 'v', False)
Expand All @@ -117,7 +110,6 @@ def qualification(self,
eventlogs=eventlogs,
cluster=cluster,
platform=platform,
target_platform=target_platform,
output_folder=output_folder,
tools_jar=tools_jar,
jvm_heap_size=jvm_heap_size,
Expand Down

0 comments on commit bec93f0

Please sign in to comment.