Skip to content

Commit

Permalink
Merge branch 'XuehaiPan:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
Junyi-99 authored Mar 27, 2024
2 parents 984ca6d + 201caef commit ef7e925
Show file tree
Hide file tree
Showing 7 changed files with 21 additions and 25 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ repos:
- id: debug-statements
- id: double-quote-string-fixer
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.0
rev: v0.3.2
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
Expand Down
2 changes: 1 addition & 1 deletion .readthedocs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ version: 2

# Set the version of Python and other tools you might need
build:
os: ubuntu-22.04
os: ubuntu-lts-latest
tools:
python: "3.8"
jobs:
Expand Down
4 changes: 2 additions & 2 deletions nvitop/api/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -746,7 +746,7 @@ def __init__(

def __repr__(self) -> str:
"""Return a string representation of the device."""
return '{}(index={}, name="{}", total_memory={})'.format(
return '{}(index={}, name={!r}, total_memory={})'.format( # noqa: UP032
self.__class__.__name__,
self.index,
self.name(),
Expand Down Expand Up @@ -2955,7 +2955,7 @@ def __init__(

def __repr__(self) -> str:
"""Return a string representation of the CUDA device."""
return '{}(cuda_index={}, nvml_index={}, name="{}", total_memory={})'.format(
return '{}(cuda_index={}, nvml_index={}, name="{}", total_memory={})'.format( # noqa: UP032
self.__class__.__name__,
self.cuda_index,
self.index,
Expand Down
14 changes: 6 additions & 8 deletions nvitop/api/libcuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,10 +259,9 @@ def __repr__(self) -> str:
)
if self.value not in CUDAError._errcode_to_name:
CUDAError._errcode_to_name[self.value] = cuGetErrorName(self.value)
return '{} Code: {} ({}).'.format(
CUDAError._errcode_to_string[self.value],
CUDAError._errcode_to_name[self.value],
self.value,
return (
f'{CUDAError._errcode_to_string[self.value]} '
f'Code: {CUDAError._errcode_to_name[self.value]} ({self.value}).'
)
except CUDAError:
return f'CUDA Error with code {self.value}.'
Expand Down Expand Up @@ -316,10 +315,9 @@ def new(cls: type[CUDAError]) -> CUDAError:
new_error_class = type(class_name, (CUDAError,), {'__new__': gen_new(err_val)})
new_error_class.__module__ = __name__
if err_val in CUDAError._errcode_to_string:
new_error_class.__doc__ = 'CUDA Error: {} Code: :data:`{}` ({}).'.format(
CUDAError._errcode_to_string[err_val],
err_name,
err_val,
new_error_class.__doc__ = (
f'CUDA Error: {CUDAError._errcode_to_string[err_val]} '
f'Code: :data:`{err_name}` ({err_val}).'
)
else:
new_error_class.__doc__ = f'CUDA Error with code :data:`{err_name}` ({err_val})'
Expand Down
14 changes: 6 additions & 8 deletions nvitop/api/libcudart.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,10 +307,9 @@ def __repr__(self) -> str:
)
if self.value not in cudaError._errcode_to_name:
cudaError._errcode_to_name[self.value] = cudaGetErrorName(self.value)
return '{} Code: {} ({}).'.format(
cudaError._errcode_to_string[self.value],
cudaError._errcode_to_name[self.value],
self.value,
return (
f'{cudaError._errcode_to_string[self.value]} '
f'Code: {cudaError._errcode_to_name[self.value]} ({self.value}).'
)
except cudaError:
return f'CUDA Error with code {self.value}.'
Expand Down Expand Up @@ -367,10 +366,9 @@ def new(cls: type[cudaError]) -> cudaError:
new_error_class = type(class_name, (cudaError,), {'__new__': gen_new(err_val)})
new_error_class.__module__ = __name__
if err_val in cudaError._errcode_to_string:
new_error_class.__doc__ = 'cudaError: {} Code: :data:`{}` ({}).'.format(
cudaError._errcode_to_string[err_val],
err_name,
err_val,
new_error_class.__doc__ = (
f'cudaError: {cudaError._errcode_to_string[err_val]} '
f'Code: :data:`{err_name}` ({err_val}).'
)
else:
new_error_class.__doc__ = f'CUDA Error with code :data:`{err_name}` ({err_val})'
Expand Down
2 changes: 1 addition & 1 deletion nvitop/api/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,7 @@ def __init__(

def __repr__(self) -> str:
"""Return a string representation of the GPU process."""
return '{}(pid={}, gpu_memory={}, type={}, device={}, host={})'.format(
return '{}(pid={}, gpu_memory={}, type={}, device={}, host={})'.format( # noqa: UP032
self.__class__.__name__,
self.pid,
self.gpu_memory_human(),
Expand Down
8 changes: 4 additions & 4 deletions nvitop/gui/screens/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,31 +148,31 @@ def format_max_cpu_percent(value):
def format_host_memory(value):
if value is NA:
return f'HOST-MEM: {value}'
return 'HOST-MEM: {} ({:.1f}%)'.format(
return 'HOST-MEM: {} ({:.1f}%)'.format( # noqa: UP032
bytes2human(value),
round(100.0 * value / total_host_memory, 1),
)

def format_max_host_memory(value):
if value is NA:
return f'MAX HOST-MEM: {value}'
return 'MAX HOST-MEM: {} ({:.1f}%) / {}'.format(
return 'MAX HOST-MEM: {} ({:.1f}%) / {}'.format( # noqa: UP032
bytes2human(value),
round(100.0 * value / total_host_memory, 1),
total_host_memory_human,
)

def format_gpu_memory(value):
if value is not NA and total_gpu_memory is not NA:
return 'GPU-MEM: {} ({:.1f}%)'.format(
return 'GPU-MEM: {} ({:.1f}%)'.format( # noqa: UP032
bytes2human(value),
round(100.0 * value / total_gpu_memory, 1),
)
return f'GPU-MEM: {value}'

def format_max_gpu_memory(value):
if value is not NA and total_gpu_memory is not NA:
return 'MAX GPU-MEM: {} ({:.1f}%) / {}'.format(
return 'MAX GPU-MEM: {} ({:.1f}%) / {}'.format( # noqa: UP032
bytes2human(value),
round(100.0 * value / total_gpu_memory, 1),
total_gpu_memory_human,
Expand Down

0 comments on commit ef7e925

Please sign in to comment.