diff --git a/src/backend/marsha/core/api/video.py b/src/backend/marsha/core/api/video.py
index 510088f3cf..08c5f0b34e 100644
--- a/src/backend/marsha/core/api/video.py
+++ b/src/backend/marsha/core/api/video.py
@@ -1226,7 +1226,7 @@ def initiate_transcript(self, request, pk=None):
timed_text_track = TimedTextTrack.objects.create(
video=video,
language=settings.LANGUAGES[0][0],
- mode=TimedTextTrack.TRANSCRIPT,
+ mode=TimedTextTrack.SUBTITLE,
upload_state=defaults.PROCESSING,
)
except IntegrityError:
diff --git a/src/backend/marsha/core/management/commands/delete_transcoding_temp_files.py b/src/backend/marsha/core/management/commands/delete_transcoding_temp_files.py
new file mode 100644
index 0000000000..02a986ff03
--- /dev/null
+++ b/src/backend/marsha/core/management/commands/delete_transcoding_temp_files.py
@@ -0,0 +1,20 @@
+"""Management command to delete transcoding temp files."""
+
+import logging
+
+from django.core.management import BaseCommand
+
+from marsha.core.utils.transcode import delete_transcoding_temp_files
+
+
+logger = logging.getLogger(__name__)
+
+
+class Command(BaseCommand):
+ """Delete transcoding temp files."""
+
+ help = "Delete transcoding temp files"
+
+ def handle(self, *args, **options):
+ """Delete all transcoding temp files."""
+ delete_transcoding_temp_files()
diff --git a/src/backend/marsha/core/tests/api/video/test_initiate_transcript.py b/src/backend/marsha/core/tests/api/video/test_initiate_transcript.py
index 7dc33d8556..8ebecd7fce 100644
--- a/src/backend/marsha/core/tests/api/video/test_initiate_transcript.py
+++ b/src/backend/marsha/core/tests/api/video/test_initiate_transcript.py
@@ -83,7 +83,7 @@ def test_api_video_initiate_transcript_token_user(self):
mock_dispatch_video.assert_called_once_with(video)
self.assertEqual(timed_text_track.language, "en")
- self.assertEqual(timed_text_track.mode, models.TimedTextTrack.TRANSCRIPT)
+ self.assertEqual(timed_text_track.mode, models.TimedTextTrack.SUBTITLE)
self.assertEqual(timed_text_track.upload_state, defaults.PROCESSING)
def test_api_video_initiate_transcript_staff_or_user(self):
@@ -136,7 +136,7 @@ def test_api_video_initiate_transcript_existing_timed_text_track(self):
factories.TimedTextTrackFactory(
video=video,
language="en",
- mode=models.TimedTextTrack.TRANSCRIPT,
+ mode=models.TimedTextTrack.SUBTITLE,
upload_state=defaults.READY,
)
jwt_token = InstructorOrAdminLtiTokenFactory(playlist=video.playlist)
@@ -204,5 +204,5 @@ def test_api_video_initiate_transcript_aws_pipeline(self):
mock_dispatch_video.assert_called_once_with(video)
self.assertEqual(timed_text_track.language, "en")
- self.assertEqual(timed_text_track.mode, models.TimedTextTrack.TRANSCRIPT)
+ self.assertEqual(timed_text_track.mode, models.TimedTextTrack.SUBTITLE)
self.assertEqual(timed_text_track.upload_state, defaults.PROCESSING)
diff --git a/src/backend/marsha/core/tests/management_commands/test_delete_transcoding_temp_files.py b/src/backend/marsha/core/tests/management_commands/test_delete_transcoding_temp_files.py
new file mode 100644
index 0000000000..f90b7faf96
--- /dev/null
+++ b/src/backend/marsha/core/tests/management_commands/test_delete_transcoding_temp_files.py
@@ -0,0 +1,23 @@
+"""Test transcript_video command."""
+
+from unittest.mock import patch
+
+from django.core.management import call_command
+from django.test import TestCase
+
+from marsha.core.utils import transcode
+
+
+@patch.object(transcode, "delete_transcoding_temp_files")
+class TranscriptVideosTestCase(TestCase):
+ """
+ Test case for the transcript_videos command.
+ """
+
+ maxDiff = None
+
+ def test_delete_transcoding_temp_files(self, mock_delete_temp_files):
+ """Should call delete_transcoding_temp_files function."""
+ call_command("delete_transcoding_temp_files")
+
+ mock_delete_temp_files.assert_called_once()
diff --git a/src/backend/marsha/core/tests/utils/test_transcode.py b/src/backend/marsha/core/tests/utils/test_transcode.py
index bee368f51b..3bcee1ea8d 100644
--- a/src/backend/marsha/core/tests/utils/test_transcode.py
+++ b/src/backend/marsha/core/tests/utils/test_transcode.py
@@ -1,7 +1,13 @@
"""Tests for the `core.utils.transcode` module."""
+from unittest.mock import patch
+from uuid import uuid4
+
from django.test import TestCase
+from django_peertube_runner_connector.factories import (
+ VideoFactory as TranscodedVideoFactory,
+)
from django_peertube_runner_connector.models import (
Video as TranscodedVideo,
VideoFile,
@@ -12,11 +18,14 @@
from marsha.core import defaults
from marsha.core.factories import VideoFactory
from marsha.core.utils.time_utils import to_datetime
-from marsha.core.utils.transcode import transcoding_ended_callback
+from marsha.core.utils.transcode import (
+ delete_transcoding_temp_files,
+ transcoding_ended_callback,
+)
-class TranscodeTestCase(TestCase):
- """Test the `transcode` functions."""
+class TranscodingEndedCallbackTestCase(TestCase):
+ """Test the `transcoding_ended_callback` function."""
def setUp(self):
# Create a test video
@@ -31,6 +40,7 @@ def setUp(self):
state=VideoState.PUBLISHED,
directory=f"vod/{self.video.pk}/video/1698941501",
)
+
self.video_playlist = VideoStreamingPlaylist.objects.create(
video=self.transcoded_video
)
@@ -50,31 +60,59 @@ def setUp(self):
extname="mp4",
)
- def test_transcoding_ended_callback(self):
+ @patch("marsha.core.utils.transcode.delete_temp_file")
+ def test_transcoding_ended_callback(self, mock_delete_temp_file):
"""The marsha video should correctly be updated."""
transcoding_ended_callback(self.transcoded_video)
self.video.refresh_from_db()
-
self.assertEqual(self.video.uploaded_on, to_datetime("1698941501"))
-
self.assertEqual(self.video.resolutions, [720, 1080])
-
self.assertEqual(self.video.upload_state, defaults.READY)
self.assertEqual(self.video.transcode_pipeline, defaults.PEERTUBE_PIPELINE)
+ mock_delete_temp_file.assert_called_once_with(
+ self.transcoded_video, f"tmp/{self.video.pk}/video/1698941501"
+ )
- def test_transcoding_ended_callback_with_error(self):
+ @patch("marsha.core.utils.transcode.delete_temp_file")
+ def test_transcoding_ended_callback_with_error(self, mock_delete_temp_file):
"""The marsha video should be set with state on error and nothing else should be done."""
self.transcoded_video.state = VideoState.TRANSCODING_FAILED
transcoding_ended_callback(self.transcoded_video)
self.video.refresh_from_db()
-
self.assertEqual(self.video.upload_state, defaults.ERROR)
-
self.assertEqual(self.video.uploaded_on, None)
-
self.assertEqual(self.video.resolutions, [])
-
self.assertEqual(self.video.transcode_pipeline, None)
+ mock_delete_temp_file.assert_called_once_with(
+ self.transcoded_video, f"tmp/{self.video.pk}/video/1698941501"
+ )
+
+
+class DeleteTranscodingTempFilesTestCase(TestCase):
+ """Test the `delete_transcoding_temp_files` function."""
+
+ @patch("marsha.core.utils.transcode.delete_temp_file")
+ def test_transcoding_delete_transcoding_temp_files(self, mock_delete_temp_file):
+ """The temp files should be deleted."""
+ video_id = uuid4()
+ video_file = TranscodedVideoFactory(
+ directory=f"vod/{video_id}/video/1698941501"
+ )
+
+ delete_transcoding_temp_files()
+
+ mock_delete_temp_file.assert_called_once_with(
+ video_file, f"tmp/{video_id}/video/1698941501"
+ )
+
+ @patch("marsha.core.utils.transcode.delete_temp_file")
+ def test_transcoding_delete_transcoding_temp_files_all(self, mock_delete_temp_file):
+ """All video files should be processed."""
+ TranscodedVideoFactory.create_batch(15)
+
+ delete_transcoding_temp_files()
+
+ self.assertEqual(mock_delete_temp_file.call_count, 15)
diff --git a/src/backend/marsha/core/tests/utils/test_transcript.py b/src/backend/marsha/core/tests/utils/test_transcript.py
index 653e305b3d..9453daf68d 100644
--- a/src/backend/marsha/core/tests/utils/test_transcript.py
+++ b/src/backend/marsha/core/tests/utils/test_transcript.py
@@ -56,10 +56,13 @@ def test_transcription_ended_callback(self):
timed_text_track = video.timedtexttracks.get()
self.assertEqual(timed_text_track.language, language)
- self.assertEqual(timed_text_track.mode, TimedTextTrack.TRANSCRIPT)
+ self.assertEqual(timed_text_track.mode, TimedTextTrack.SUBTITLE)
self.assertEqual(timed_text_track.upload_state, defaults.READY)
self.assertEqual(timed_text_track.extension, "vtt")
+ video.refresh_from_db()
+ self.assertTrue(video.should_use_subtitle_as_transcript)
+
ttt_path = timed_text_track.get_videos_storage_prefix()
self.assertTrue(
video_storage.exists(f"{ttt_path}/source.{timed_text_track.extension}")
@@ -114,15 +117,15 @@ def test_transcript_video_no_video(self, mock_launch_video_transcript):
mock_launch_video_transcript.delay.assert_not_called()
@patch.object(transcript_utils, "launch_video_transcript")
- def test_transcript_video_already_transcript(self, mock_launch_video_transcript):
+ def test_transcript_video_already_subtitle(self, mock_launch_video_transcript):
"""
Should not call the launch_video_transcript function
- if the video already has a transcript.
+ if the video already has a subtitle.
"""
timed_text_track = TimedTextTrackFactory(
video=VideoFactory(upload_state=defaults.READY),
language=settings.LANGUAGES[0][0],
- mode=TimedTextTrack.TRANSCRIPT,
+ mode=TimedTextTrack.SUBTITLE,
)
with self.assertRaises(transcript_utils.TranscriptError) as context:
@@ -130,15 +133,15 @@ def test_transcript_video_already_transcript(self, mock_launch_video_transcript)
self.assertEqual(
str(context.exception),
- f"A transcript already exists for video {timed_text_track.video.id}",
+ f"A subtitle already exists for video {timed_text_track.video.id}",
)
mock_launch_video_transcript.delay.assert_not_called()
@patch.object(transcript_utils, "launch_video_transcript")
- def test_transcript_video_already_subtitle(self, mock_launch_video_transcript):
+ def test_transcript_video_already_transcript(self, mock_launch_video_transcript):
"""
Should call the launch_video_transcript function
- if the video has a subtitle.
+ if the video has a transcript.
"""
timed_text_track = TimedTextTrackFactory(
video=VideoFactory(
@@ -146,7 +149,7 @@ def test_transcript_video_already_subtitle(self, mock_launch_video_transcript):
transcode_pipeline=defaults.PEERTUBE_PIPELINE,
),
language=settings.LANGUAGES[0][0],
- mode=TimedTextTrack.SUBTITLE,
+ mode=TimedTextTrack.TRANSCRIPT,
)
transcript_utils.transcript(timed_text_track.video)
@@ -159,7 +162,7 @@ def test_transcript_video_already_subtitle(self, mock_launch_video_transcript):
self.assertEqual(timed_text_track.video.timedtexttracks.count(), 2)
self.assertTrue(
timed_text_track.video.timedtexttracks.filter(
- mode=TimedTextTrack.TRANSCRIPT
+ mode=TimedTextTrack.SUBTITLE
).exists()
)
@@ -190,7 +193,7 @@ def test_transcript_video_already_closed_caption(
self.assertEqual(timed_text_track.video.timedtexttracks.count(), 2)
self.assertTrue(
timed_text_track.video.timedtexttracks.filter(
- mode=TimedTextTrack.TRANSCRIPT
+ mode=TimedTextTrack.SUBTITLE
).exists()
)
diff --git a/src/backend/marsha/core/utils/transcode.py b/src/backend/marsha/core/utils/transcode.py
index dc1e916c23..530d127bbf 100644
--- a/src/backend/marsha/core/utils/transcode.py
+++ b/src/backend/marsha/core/utils/transcode.py
@@ -1,15 +1,22 @@
""" Utils related to transcoding """
from django_peertube_runner_connector.models import Video as TranscodedVideo, VideoState
-
-from marsha.core.defaults import ERROR, PEERTUBE_PIPELINE, READY
+from django_peertube_runner_connector.utils.files import delete_temp_file
+
+from marsha.core.defaults import (
+ ERROR,
+ PEERTUBE_PIPELINE,
+ READY,
+ TMP_VIDEOS_STORAGE_BASE_DIRECTORY,
+ VOD_VIDEOS_STORAGE_BASE_DIRECTORY,
+)
from marsha.core.models.video import Video
from marsha.core.utils.time_utils import to_datetime
def transcoding_ended_callback(transcoded_video: TranscodedVideo):
"""
- Callback used when the a Peertube runnner has finished
+ Callback used when a Peertube runner has finished
to transcode a video.
Parameters
@@ -23,6 +30,11 @@ def transcoding_ended_callback(transcoded_video: TranscodedVideo):
uploaded_on = directory[-1]
video_id = directory[-3]
video = Video.objects.get(pk=video_id)
+ tmp_filename = transcoded_video.directory.replace(
+ VOD_VIDEOS_STORAGE_BASE_DIRECTORY, TMP_VIDEOS_STORAGE_BASE_DIRECTORY
+ )
+
+ delete_temp_file(transcoded_video, tmp_filename)
if transcoded_video.state == VideoState.TRANSCODING_FAILED:
video.update_upload_state(ERROR, None)
@@ -39,3 +51,12 @@ def transcoding_ended_callback(transcoded_video: TranscodedVideo):
to_datetime(uploaded_on),
**{"resolutions": resolutions},
)
+
+
+def delete_transcoding_temp_files():
+ """Delete all transcoding temp files."""
+ for transcoded_video in TranscodedVideo.objects.all():
+ tmp_filename = transcoded_video.directory.replace(
+ VOD_VIDEOS_STORAGE_BASE_DIRECTORY, TMP_VIDEOS_STORAGE_BASE_DIRECTORY
+ )
+ delete_temp_file(transcoded_video, tmp_filename)
diff --git a/src/backend/marsha/core/utils/transcript_utils.py b/src/backend/marsha/core/utils/transcript_utils.py
index 2312d07267..f33809d74e 100644
--- a/src/backend/marsha/core/utils/transcript_utils.py
+++ b/src/backend/marsha/core/utils/transcript_utils.py
@@ -28,13 +28,11 @@ def transcript(video):
TimedTextTrack.objects.create(
video=video,
language=settings.LANGUAGES[0][0],
- mode=TimedTextTrack.TRANSCRIPT,
+ mode=TimedTextTrack.SUBTITLE,
upload_state=defaults.PROCESSING,
)
except IntegrityError as e:
- raise TranscriptError(
- f"A transcript already exists for video {video.id}"
- ) from e
+ raise TranscriptError(f"A subtitle already exists for video {video.id}") from e
domain = (
settings.TRANSCODING_CALLBACK_DOMAIN
@@ -78,7 +76,7 @@ def transcription_ended_callback(
timed_text_track, created = video.timedtexttracks.get_or_create(
upload_state=defaults.PROCESSING,
- mode=TimedTextTrack.TRANSCRIPT,
+ mode=TimedTextTrack.SUBTITLE,
defaults={
"language": language,
"extension": "vtt",
@@ -86,6 +84,8 @@ def transcription_ended_callback(
"upload_state": defaults.READY,
},
)
+ video.should_use_subtitle_as_transcript = True
+ video.save()
if not created:
timed_text_track.upload_state = defaults.READY
timed_text_track.uploaded_on = to_datetime(uploaded_on)
diff --git a/src/backend/setup.cfg b/src/backend/setup.cfg
index a4eba094da..ece62374f8 100644
--- a/src/backend/setup.cfg
+++ b/src/backend/setup.cfg
@@ -45,7 +45,7 @@ install_requires =
django-redis==5.4.0
django-safedelete==1.4.0
django-storages==1.14.3
- django-peertube-runner-connector==0.9.0
+ django-peertube-runner-connector==0.10.0
django-waffle==4.1.0
Django<5
djangorestframework==3.15.2
diff --git a/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/TimedTextTrackItem/index.spec.tsx b/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/TimedTextTrackItem/index.spec.tsx
index 072523e299..5a2d26b59e 100644
--- a/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/TimedTextTrackItem/index.spec.tsx
+++ b/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/TimedTextTrackItem/index.spec.tsx
@@ -113,10 +113,11 @@ describe('', () => {
).not.toBeInTheDocument();
});
- it('renders the component for a processing timed text track', async () => {
+ it('renders the component for a processing transcript', async () => {
const mockedTimedTextTrack = timedTextMockFactory({
language: 'fr-FR',
upload_state: uploadState.PROCESSING,
+ mode: timedTextMode.TRANSCRIPT,
});
const mockedUploadingObject: UploadingObject = {
@@ -159,11 +160,58 @@ describe('', () => {
).not.toBeInTheDocument();
});
- it('renders the component for a processing transcript', async () => {
+ it('renders the component for a processing closed caption', async () => {
const mockedTimedTextTrack = timedTextMockFactory({
language: 'fr-FR',
upload_state: uploadState.PROCESSING,
- mode: timedTextMode.TRANSCRIPT,
+ mode: timedTextMode.CLOSED_CAPTIONING,
+ });
+
+ const mockedUploadingObject: UploadingObject = {
+ file: new File([], 'subtitle.srt'),
+ objectType: modelName.TIMEDTEXTTRACKS,
+ objectId: mockedTimedTextTrack.id,
+ progress: 100,
+ status: UploadManagerStatus.SUCCESS,
+ };
+
+ render(
+ {},
+ uploadManagerState: {
+ [mockedTimedTextTrack.id]: mockedUploadingObject,
+ },
+ }}
+ >
+
+
+
+ ,
+ );
+
+ await screen.findByText('French');
+ screen.getByRole('button', {
+ name: 'Click on this button to delete the timed text track.',
+ });
+ screen.getByText('Processing');
+ expect(
+ screen.queryByRole('button', {
+ name: 'Click on this button to retry uploading your failed upload.',
+ }),
+ ).not.toBeInTheDocument();
+ });
+
+ it('renders the component for a processing subtitle', async () => {
+ const mockedTimedTextTrack = timedTextMockFactory({
+ language: 'fr-FR',
+ upload_state: uploadState.PROCESSING,
+ mode: timedTextMode.SUBTITLE,
});
const mockedUploadingObject: UploadingObject = {
diff --git a/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/TimedTextTrackItem/index.tsx b/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/TimedTextTrackItem/index.tsx
index 063e16f509..ae3d89a38a 100644
--- a/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/TimedTextTrackItem/index.tsx
+++ b/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/TimedTextTrackItem/index.tsx
@@ -70,7 +70,7 @@ export const TimedTextTrackItem = ({
pad={{ horizontal: 'small', vertical: 'small' }}
>
{(timedTextTrack.upload_state !== uploadState.PROCESSING ||
- timedTextTrack.mode !== timedTextMode.TRANSCRIPT) && (
+ timedTextTrack.mode !== timedTextMode.SUBTITLE) && (
', () => {
await screen.findByText('French');
screen.getByText('Uploading');
- screen.getByText('English');
+ // screen.getByText('English');
screen.getByText('Processing');
screen.getByText('Spanish');
screen.getByText('Uploading');
@@ -307,10 +307,10 @@ describe('', () => {
screen.getAllByRole('button', {
name: 'Click on this button to delete the timed text track.',
}),
- ).toHaveLength(3);
+ ).toHaveLength(2);
});
- it('renders a generate transcription button in transcript mode if none exists', async () => {
+ it('renders a generate transcription button in subtitle mode if none exists', async () => {
const mockedVideo = videoMockFactory({
id: '1234',
});
@@ -338,7 +338,7 @@ describe('', () => {
render(
wrapInVideo(
,
mockedVideo,
),
@@ -351,7 +351,7 @@ describe('', () => {
await userEvent.click(generateTranscriptButton);
screen.getByText(
- "By clicking this button, a transcript will be automatically generated. The transcript's language will be the one detected in the video.",
+ "By clicking this button, a transcript will be automatically generated and used as a subtitle. The transcript's language will be the one detected in the video.",
);
expect(fetchMock.calls()).toHaveLength(2);
@@ -360,7 +360,7 @@ describe('', () => {
);
});
- it('does not render a generate transcription button in transcript mode if one exists', () => {
+ it('does not render a generate transcription button in subtitle mode if one exists', () => {
const mockedVideo = videoMockFactory({
id: '1234',
});
@@ -383,7 +383,7 @@ describe('', () => {
const mockedTimedTextTrackCompleted = timedTextMockFactory({
language: 'es-ES',
- mode: timedTextMode.TRANSCRIPT,
+ mode: timedTextMode.SUBTITLE,
});
useTimedTextTrack.getState().addResource(mockedTimedTextTrackCompleted);
@@ -405,7 +405,7 @@ describe('', () => {
).not.toBeInTheDocument();
});
- it('does not render a generate transcription button in subtitle mode', () => {
+ it('does not render a generate transcription button in transcript mode', () => {
const mockedVideo = videoMockFactory({
id: '1234',
});
@@ -429,7 +429,7 @@ describe('', () => {
render(
wrapInVideo(
,
mockedVideo,
),
@@ -478,7 +478,7 @@ describe('', () => {
).not.toBeInTheDocument();
});
- it('does not render a generate transcription button in transcript mode if flag disabled', () => {
+ it('does not render a generate transcription button in subtitle mode if flag disabled', () => {
const mockedVideo = videoMockFactory({
id: '1234',
});
@@ -506,7 +506,7 @@ describe('', () => {
render(
wrapInVideo(
,
mockedVideo,
),
diff --git a/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/index.tsx b/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/index.tsx
index 2cf8d6ce17..53ee23f93e 100644
--- a/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/index.tsx
+++ b/src/frontend/packages/lib_video/src/components/common/VideoWidgetProvider/LocalizedTimedTextTrackUpload/index.tsx
@@ -65,7 +65,7 @@ const messages = defineMessages({
},
transcriptMessage: {
defaultMessage:
- "By clicking this button, a transcript will be automatically generated. The transcript's language will be the one detected in the video.",
+ "By clicking this button, a transcript will be automatically generated and used as a subtitle. The transcript's language will be the one detected in the video.",
description: 'Message displayed for explaining the transcript generation.',
id: 'components.UploadWidgetGeneric.transcriptMessage',
},
@@ -251,7 +251,7 @@ export const LocalizedTimedTextTrackUpload = ({
{isFlagEnabled(flags.TRANSCRIPTION) &&
video.upload_state === uploadState.READY &&
- timedTextModeWidget === timedTextMode.TRANSCRIPT &&
+ timedTextModeWidget === timedTextMode.SUBTITLE &&
filteredTimedTextTracks.length === 0 && (