|
| 1 | +#!/usr/bin/env python3 |
| 2 | +"""Voice-to-Text Transcription Tool with Speaker Identification""" |
| 3 | +import os |
| 4 | +import json |
| 5 | +import wave |
| 6 | +import speech_recognition as sr |
| 7 | +from pydub import AudioSegment |
| 8 | +from pydub.silence import detect_nonsilent |
| 9 | +import argparse |
| 10 | + |
| 11 | +def convert_audio(file_path): |
| 12 | + """Convert audio to WAV format if needed""" |
| 13 | + ext = os.path.splitext(file_path)[1].lower() |
| 14 | + if ext != '.wav': |
| 15 | + audio = AudioSegment.from_file(file_path) |
| 16 | + wav_path = file_path.replace(ext, '.wav') |
| 17 | + audio.export(wav_path, format='wav') |
| 18 | + return wav_path |
| 19 | + return file_path |
| 20 | + |
| 21 | +def detect_speakers(audio_path, min_silence=500, silence_thresh=-40): |
| 22 | + """Simple speaker detection using silence-based segmentation""" |
| 23 | + audio = AudioSegment.from_wav(audio_path) |
| 24 | + chunks = detect_nonsilent(audio, min_silence_len=min_silence, silence_thresh=silence_thresh) |
| 25 | + segments = [] |
| 26 | + for i, (start, end) in enumerate(chunks): |
| 27 | + segments.append({'speaker': f'Speaker_{(i % 3) + 1}', 'start': start/1000, 'end': end/1000}) |
| 28 | + return segments |
| 29 | + |
| 30 | +def transcribe_audio(audio_path, segments, engine='google'): |
| 31 | + """Transcribe audio segments""" |
| 32 | + recognizer = sr.Recognizer() |
| 33 | + audio = AudioSegment.from_wav(audio_path) |
| 34 | + results = [] |
| 35 | + |
| 36 | + for seg in segments: |
| 37 | + try: |
| 38 | + chunk = audio[int(seg['start']*1000):int(seg['end']*1000)] |
| 39 | + chunk_path = 'temp_chunk.wav' |
| 40 | + chunk.export(chunk_path, format='wav') |
| 41 | + |
| 42 | + with sr.AudioFile(chunk_path) as source: |
| 43 | + audio_data = recognizer.record(source) |
| 44 | + text = recognizer.recognize_google(audio_data) if engine == 'google' else recognizer.recognize_sphinx(audio_data) |
| 45 | + results.append({'speaker': seg['speaker'], 'start': seg['start'], 'end': seg['end'], 'text': text}) |
| 46 | + os.remove(chunk_path) |
| 47 | + except Exception as e: |
| 48 | + results.append({'speaker': seg['speaker'], 'start': seg['start'], 'end': seg['end'], 'text': f'[Error: {str(e)}]'}) |
| 49 | + return results |
| 50 | + |
| 51 | +def format_time(seconds): |
| 52 | + """Format seconds to HH:MM:SS,mmm for SRT""" |
| 53 | + hrs = int(seconds // 3600) |
| 54 | + mins = int((seconds % 3600) // 60) |
| 55 | + secs = int(seconds % 60) |
| 56 | + millis = int((seconds % 1) * 1000) |
| 57 | + return f"{hrs:02d}:{mins:02d}:{secs:02d},{millis:03d}" |
| 58 | + |
| 59 | +def export_results(results, output_path, format_type='txt'): |
| 60 | + """Export transcription in various formats""" |
| 61 | + if format_type == 'txt': |
| 62 | + with open(output_path, 'w') as f: |
| 63 | + for r in results: |
| 64 | + f.write(f"[{r['start']:.2f}s - {r['end']:.2f}s] {r['speaker']}: {r['text']}\n") |
| 65 | + elif format_type == 'srt': |
| 66 | + with open(output_path, 'w') as f: |
| 67 | + for i, r in enumerate(results, 1): |
| 68 | + f.write(f"{i}\n{format_time(r['start'])} --> {format_time(r['end'])}\n{r['speaker']}: {r['text']}\n\n") |
| 69 | + elif format_type == 'json': |
| 70 | + with open(output_path, 'w') as f: |
| 71 | + json.dump(results, f, indent=2) |
| 72 | + |
| 73 | +def main(): |
| 74 | + parser = argparse.ArgumentParser(description='Voice-to-Text with Speaker ID') |
| 75 | + parser.add_argument('input', help='Input audio file') |
| 76 | + parser.add_argument('-o', '--output', help='Output file', default='transcription') |
| 77 | + parser.add_argument('-f', '--format', choices=['txt', 'srt', 'json'], default='txt') |
| 78 | + parser.add_argument('-e', '--engine', choices=['google', 'sphinx'], default='google') |
| 79 | + args = parser.parse_args() |
| 80 | + |
| 81 | + print(f"Converting audio: {args.input}") |
| 82 | + wav_path = convert_audio(args.input) |
| 83 | + |
| 84 | + print("Detecting speakers...") |
| 85 | + segments = detect_speakers(wav_path) |
| 86 | + print(f"Found {len(segments)} segments") |
| 87 | + |
| 88 | + print("Transcribing audio...") |
| 89 | + results = transcribe_audio(wav_path, segments, args.engine) |
| 90 | + |
| 91 | + output_file = f"{args.output}.{args.format}" |
| 92 | + export_results(results, output_file, args.format) |
| 93 | + print(f"Transcription saved to {output_file}") |
| 94 | + |
| 95 | + if wav_path != args.input: |
| 96 | + os.remove(wav_path) |
| 97 | + |
| 98 | +if __name__ == '__main__': |
| 99 | + main() |
0 commit comments