Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Accelerate with AMD GPUs #512

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions src/python_run/piper/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,11 @@ def main() -> None:
"--noise-w", "--noise_w", type=float, help="Phoneme width noise"
)
#
parser.add_argument("--cuda", action="store_true", help="Use GPU")
parser.add_argument("--cuda", action="store_true", help="Use Nvidia GPU")
#
parser.add_argument("--migraphx", action="store_true", help="Use AMD GPU")
#
parser.add_argument("--rocm", action="store_true", help="Use ROCm-enabled GPU")
#
parser.add_argument(
"--sentence-silence",
Expand Down Expand Up @@ -105,7 +109,7 @@ def main() -> None:
args.model, args.config = find_voice(args.model, args.data_dir)

# Load voice
voice = PiperVoice.load(args.model, config_path=args.config, use_cuda=args.cuda)
voice = PiperVoice.load(args.model, config_path=args.config, use_cuda=args.cuda, use_rocm=args.rocm, use_migraphx=args.migraphx)
synthesize_args = {
"speaker_id": args.speaker,
"length_scale": args.length_scale,
Expand Down
8 changes: 6 additions & 2 deletions src/python_run/piper/http_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,11 @@ def main() -> None:
"--noise-w", "--noise_w", type=float, help="Phoneme width noise"
)
#
parser.add_argument("--cuda", action="store_true", help="Use GPU")
parser.add_argument("--cuda", action="store_true", help="Use Nvidia GPU")
#
parser.add_argument("--migraphx", action="store_true", help="Use AMD GPU")
#
parser.add_argument("--rocm", action="store_true", help="Use ROCm-enabled GPU")
#
parser.add_argument(
"--sentence-silence",
Expand Down Expand Up @@ -90,7 +94,7 @@ def main() -> None:
args.model, args.config = find_voice(args.model, args.data_dir)

# Load voice
voice = PiperVoice.load(args.model, config_path=args.config, use_cuda=args.cuda)
voice = PiperVoice.load(args.model, config_path=args.config, use_cuda=args.cuda, use_rocm=args.rocm, use_migraphx=args.migraphx)
synthesize_args = {
"speaker_id": args.speaker,
"length_scale": args.length_scale,
Expand Down
34 changes: 34 additions & 0 deletions src/python_run/piper/voice.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ def load(
model_path: Union[str, Path],
config_path: Optional[Union[str, Path]] = None,
use_cuda: bool = False,
use_rocm: bool = False,
use_migraphx: bool = False,
) -> "PiperVoice":
"""Load an ONNX model and config."""
if config_path is None:
Expand All @@ -42,6 +44,38 @@ def load(
{"cudnn_conv_algo_search": "HEURISTIC"},
)
]
elif use_rocm:
"""
To support ROCm-enabled GPUs via 'ROCMExecutionProvider' or 'MIGraphXExecutionProvider':
1. Install piper-tts
> pip install piper-tts
2. Uninstall onnxruntime
> pip uninstall onnxruntime
3. Install onnxruntime-rocm
> pip3 install https://repo.radeon.com/rocm/manylinux/rocm-rel-6.0.2/onnxruntime_rocm-inference-1.17.0-cp310-cp310-linux_x86_64.whl --no-cache-dir
Remarks: Wheel files that support different ROCm versions are available at: https://repo.radeon.com/rocm/manylinux

To verify:
> python3
$ import onnxruntime
$ onnxruntime.get_available_providers()
Output:
```
['MIGraphXExecutionProvider', 'ROCMExecutionProvider', 'CPUExecutionProvider']
```

To accelerate with AMD GPUs:
> piper --migraphx

To accelerate with ROCm-enabled GPUs:
> piper --rocm

Remarks: Tested on Ubuntu 22.04.4 + Kernel 6.6.32 + ROCm 6.0.2
Setup notes are available at: https://github.com/eliranwong/MultiAMDGPU_AIDev_Ubuntu/tree/main
"""
providers = ["ROCMExecutionProvider", "CPUExecutionProvider"]
elif use_migraphx:
providers = ["MIGraphXExecutionProvider", "CPUExecutionProvider"]
else:
providers = ["CPUExecutionProvider"]

Expand Down