{"payload":{"pageCount":4,"repositories":[{"type":"Public","name":"NeMo","owner":"NVIDIA","isFork":false,"description":"A scalable generative AI framework built for researchers and developers working on Large Language Models, Multimodal, and Speech AI (Automatic Speech Recognition and Text-to-Speech)","allTopics":["machine-translation","tts","speech-synthesis","neural-networks","deeplearning","speaker-recognition","asr","multimodal","speech-translation","large-language-models","speaker-diariazation","generative-ai"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":89,"issueCount":52,"starsCount":10336,"forksCount":2210,"license":"Apache License 2.0","participation":[19,9,18,19,8,22,18,10,17,25,17,19,18,28,9,19,34,21,23,21,13,9,5,13,5,7,10,20,9,4,13,17,23,21,10,15,30,29,30,20,37,26,10,26,36,28,22,40,31,32,17,26],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T01:36:07.044Z"}},{"type":"Public","name":"cloudai","owner":"NVIDIA","isFork":false,"description":"CloudAI Benchmark Framework","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":0,"starsCount":15,"forksCount":8,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,49,35,53],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T01:55:49.060Z"}},{"type":"Public","name":"workbench-example-hybrid-rag","owner":"NVIDIA","isFork":false,"description":"An NVIDIA AI Workbench example project for Retrieval Augmented Generation (RAG)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":3,"starsCount":45,"forksCount":123,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,1,2,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T23:53:26.713Z"}},{"type":"Public","name":"NVFlare","owner":"NVIDIA","isFork":false,"description":"NVIDIA Federated Learning Application Runtime Environment","allTopics":["python"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":9,"issueCount":24,"starsCount":554,"forksCount":149,"license":"Apache License 2.0","participation":[12,3,9,0,11,8,10,11,14,10,12,11,11,13,16,23,18,4,6,8,14,16,14,8,7,13,8,13,17,4,13,9,5,11,13,3,1,5,1,7,1,11,2,13,8,16,2,13,8,6,7,8],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T23:32:22.597Z"}},{"type":"Public","name":"NeMo-Curator","owner":"NVIDIA","isFork":false,"description":"Scalable toolkit for data curation","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":9,"issueCount":28,"starsCount":283,"forksCount":29,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,5,1,4,1,2,4,5,4,1,6,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T01:38:04.152Z"}},{"type":"Public","name":"TransformerEngine","owner":"NVIDIA","isFork":false,"description":"A library for accelerating Transformer models on NVIDIA GPUs, including using 8-bit floating point (FP8) precision on Hopper and Ada GPUs, to provide better performance with lower memory utilization in both training and inference.","allTopics":["python","machine-learning","deep-learning","gpu","cuda","pytorch","jax","fp8"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":30,"issueCount":95,"starsCount":1514,"forksCount":232,"license":"Apache License 2.0","participation":[10,7,12,3,2,11,10,5,5,10,8,4,2,3,2,9,6,10,9,9,4,3,9,7,2,13,4,13,0,0,11,5,12,10,12,7,5,6,8,7,5,6,3,13,5,14,9,9,6,9,7,10],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T01:21:55.340Z"}},{"type":"Public","name":"earth2studio","owner":"NVIDIA","isFork":false,"description":"Open-source deep-learning framework for exploring, building and deploying AI weather/climate workflows.","allTopics":["weather","ai","deep-learning","climate-science"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":3,"starsCount":33,"forksCount":5,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,21,8,5,2,10,2,3,7],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T21:14:19.833Z"}},{"type":"Public","name":"NeMo-Aligner","owner":"NVIDIA","isFork":false,"description":"Scalable toolkit for efficient model alignment","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":21,"issueCount":40,"starsCount":286,"forksCount":32,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,9,2,12,3,1,3,1,1,2,0,7,0,3,0,2,3,2,3,2,2,4,3,3,2,5,0,1,1,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T20:16:24.239Z"}},{"type":"Public","name":"modulus","owner":"NVIDIA","isFork":false,"description":"Open-source deep-learning framework for building, training, and fine-tuning deep learning models using state-of-the-art Physics-ML methods","allTopics":["machine-learning","deep-learning","physics","pytorch","nvidia-gpu"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":17,"issueCount":95,"starsCount":718,"forksCount":149,"license":"Apache License 2.0","participation":[6,3,4,5,1,2,7,11,7,10,0,3,1,3,0,11,8,4,4,2,4,5,8,9,5,11,4,6,1,0,6,3,7,6,4,8,4,8,7,5,1,4,2,4,14,5,3,10,4,1,3,5],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T22:49:06.149Z"}},{"type":"Public","name":"NeMo-Guardrails","owner":"NVIDIA","isFork":false,"description":"NeMo Guardrails is an open-source toolkit for easily adding programmable guardrails to LLM-based conversational systems.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":27,"issueCount":161,"starsCount":3566,"forksCount":306,"license":"Other","participation":[9,8,56,39,8,29,28,27,18,14,17,18,45,68,23,46,37,74,58,52,67,46,42,73,54,29,44,39,20,3,26,35,27,24,48,49,55,39,41,25,58,75,34,1,10,36,3,5,14,3,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T19:29:08.292Z"}},{"type":"Public","name":"swift","owner":"NVIDIA","isFork":true,"description":"OpenStack Storage (Swift). Mirror of code maintained at opendev.org.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":1059,"license":"Apache License 2.0","participation":[6,7,14,14,8,4,13,2,16,13,16,3,4,15,2,11,9,3,9,3,5,7,5,5,3,3,3,4,1,1,8,5,9,7,3,11,3,5,13,5,16,7,1,4,0,4,9,8,10,11,3,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T20:01:53.172Z"}},{"type":"Public","name":"warp","owner":"NVIDIA","isFork":false,"description":"A Python framework for high performance GPU simulation and graphics","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":48,"starsCount":1748,"forksCount":148,"license":"Other","participation":[24,19,51,33,34,7,46,11,8,10,39,17,32,27,1,15,31,11,15,39,55,38,32,23,12,16,16,30,11,0,9,36,31,22,9,7,18,39,46,47,44,25,4,15,8,41,40,38,27,26,33,28],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T16:13:48.828Z"}},{"type":"Public","name":"ACE","owner":"NVIDIA","isFork":false,"description":"NVIDIA ACE samples, workflows, and resources","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":0,"starsCount":14,"forksCount":5,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T03:07:15.316Z"}},{"type":"Public","name":"Megatron-LM","owner":"NVIDIA","isFork":false,"description":"Ongoing research training transformer models at scale","allTopics":["transformers","model-para","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":129,"issueCount":309,"starsCount":8951,"forksCount":2016,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-02T16:23:03.886Z"}},{"type":"Public","name":"Stable-Diffusion-WebUI-TensorRT","owner":"NVIDIA","isFork":false,"description":"TensorRT Extension for Stable Diffusion Web UI","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":15,"issueCount":139,"starsCount":1800,"forksCount":138,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,8,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-02T13:49:56.975Z"}},{"type":"Public","name":"NeMo-text-processing","owner":"NVIDIA","isFork":false,"description":"NeMo text processing for ASR and TTS","allTopics":["text-normalization","inverse-text-n"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":5,"starsCount":227,"forksCount":73,"license":"Apache License 2.0","participation":[4,2,1,4,1,1,0,0,1,1,2,1,2,2,0,0,1,2,1,1,4,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,2,0,1,0,4,1,1,0,0,1,2,4,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T21:49:40.151Z"}},{"type":"Public","name":"NeMo-Framework-Launcher","owner":"NVIDIA","isFork":false,"description":"NeMo Megatron launcher and tools","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":13,"issueCount":23,"starsCount":401,"forksCount":118,"license":"Apache License 2.0","participation":[11,5,8,2,7,6,18,6,18,16,1,12,5,17,22,5,1,10,34,11,7,7,1,20,13,17,37,28,14,5,19,24,27,25,10,57,25,35,22,27,42,5,7,14,16,24,21,14,36,50,12,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T21:49:31.708Z"}},{"type":"Public","name":"nv-cloud-function-helpers","owner":"NVIDIA","isFork":false,"description":"Functions that simplify common tasks with NVIDIA Cloud Functions","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":12,"issueCount":0,"starsCount":8,"forksCount":2,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,6,2,9,5,0,1,0,0,0,0,1,0,0,0,6,0,3,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-29T18:47:08.264Z"}},{"type":"Public","name":"TensorRT-Model-Optimizer","owner":"NVIDIA","isFork":false,"description":"TensorRT Model Optimizer is a unified library of state-of-the-art model optimization techniques such as quantization and sparsity. It compresses deep learning models for downstream deployment frameworks like TensorRT-LLM or TensorRT to optimize inference speed on NVIDIA GPUs.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":8,"starsCount":229,"forksCount":13,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-29T07:44:32.173Z"}},{"type":"Public","name":"tao_tensorflow2_backend","owner":"NVIDIA","isFork":false,"description":"TAO Toolkit deep learning networks with TensorFlow 2.x backend","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":5,"forksCount":0,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,1,3,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-29T01:23:40.767Z"}},{"type":"Public","name":"Megatron-Energon","owner":"NVIDIA","isFork":false,"description":"Megatron's multi-modal data loader","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":7,"forksCount":0,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-24T19:42:03.102Z"}},{"type":"Public","name":"GenerativeAIExamples","owner":"NVIDIA","isFork":false,"description":"Generative AI reference workflows optimized for accelerated infrastructure and microservice architecture.","allTopics":["microservice","gpu-acceleration","nemo","tensorrt","rag","triton-inference-server","large-language-models","llm","llm-inference","retrieval-augmented-generation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":9,"issueCount":19,"starsCount":1653,"forksCount":264,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,2,1,1,1,2,2,0,3,1,1,0,4,0,2,3,1,0,0,1,3,1,2,1,0,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-24T19:21:56.508Z"}},{"type":"Public","name":"ChatRTX","owner":"NVIDIA","isFork":false,"description":"A developer reference project for creating Retrieval Augmented Generation (RAG) chatbots on Windows using TensorRT-LLM","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":19,"starsCount":2446,"forksCount":262,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,2,10,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,3,0,0,0,0,0,0,0,0,0,0,0,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-24T16:45:47.293Z"}},{"type":"Public","name":"spark-rapids-benchmarks","owner":"NVIDIA","isFork":false,"description":"Spark RAPIDS Benchmarks – benchmark sets and utilities for the RAPIDS Accelerator for Apache Spark","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":21,"starsCount":31,"forksCount":26,"license":"Apache License 2.0","participation":[0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,1,2,0,1,1,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T14:51:29.205Z"}},{"type":"Public","name":"air_sdk","owner":"NVIDIA","isFork":false,"description":"A Python SDK library for interacting with NVIDIA Air","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":5,"forksCount":4,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-23T17:24:35.791Z"}},{"type":"Public","name":"numbast","owner":"NVIDIA","isFork":false,"description":"Numbast is a tool to build an automated pipeline that converts CUDA APIs into Numba bindings.","allTopics":["cuda","numba"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":18,"starsCount":13,"forksCount":3,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-27T22:34:07.312Z"}},{"type":"Public","name":"cuda-python","owner":"NVIDIA","isFork":false,"description":"CUDA Python Low-level Bindings","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":10,"starsCount":788,"forksCount":60,"license":"Other","participation":[0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-21T16:20:17.052Z"}},{"type":"Public","name":"audio-flamingo","owner":"NVIDIA","isFork":false,"description":"PyTorch implementation of Audio Flamingo: A Novel Audio Language Model with Few-Shot Learning and Dialogue Abilities.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":8,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-20T18:40:22.567Z"}},{"type":"Public","name":"NeMo-speech-data-processor","owner":"NVIDIA","isFork":false,"description":"A toolkit for processing speech data and creating speech datasets","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":1,"starsCount":63,"forksCount":20,"license":"Apache License 2.0","participation":[3,1,1,1,0,11,7,20,0,0,0,0,0,0,0,0,0,0,0,0,1,1,20,1,2,1,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,5,0,0,1,1,1,2,2,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T11:55:08.869Z"}},{"type":"Public","name":"hpc-container-maker","owner":"NVIDIA","isFork":false,"description":"HPC Container Maker","allTopics":["docker","containers","hpc","singularity"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":11,"starsCount":439,"forksCount":86,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-17T19:54:46.029Z"}}],"repositoryCount":110,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}