Hi,
I've just finished implementing Voicegram / Multiple-Location Audio Recording using HTML5 (see the OpenAI-based attachment "voicegram.html") along with the W3C WebAudio and WebStorage APIs. The system works pretty smoothly, allowing for efficient audio recording and storage across multiple locations.
I was thinking that this feature might be a good addition to Nikita's audio archive as well. It could potentially enhance our current setup and provide more flexibility in managing audio files from different spots.
What do you reckon? Let me know if you'd be interested in discussing this further or seeing a demo.
Below is the source code in HTML5 of the Implementation:
-- <!DOCTYPE html> <html lang="no"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Gingerblue MLAR</title> <script src="<https://code.jquery.com/jquery-3.6.0.min.js>"></script> <style> body { font-family: Arial, sans-serif; text-align: center; background-color: #f4f4f4; } h1 { color: #333; } button { padding: 10px 20px; margin: 10px; border: none; background-color: #007bff; color: white; cursor: pointer; } button:disabled { background-color: #ccc; } #recordingsList { list-style: none; padding: 0; } #recordingsList li { background: white; margin: 10px; padding: 10px; border-radius: 5px; box-shadow: 0 0 5px rgba(0, 0, 0, 0.1); } </style> </head> <body> <h1>Gingerblue Multiple-Location Audio Recording</h1> <button id="startRecording">Start opptak</button> <button id="stopRecording" disabled>Stopp opptak</button> <ul id="recordingsList"></ul>
<script> let mediaRecorder; let audioChunks = [];
async function startRecording() { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); mediaRecorder = new MediaRecorder(stream); mediaRecorder.ondataavailable = event => { audioChunks.push(event.data); }; mediaRecorder.onstop = saveRecording; mediaRecorder.start(); }
function stopRecording() { mediaRecorder.stop(); }
function saveRecording() { const audioBlob = new Blob(audioChunks, { type: 'audio/wav' }); const reader = new FileReader(); reader.onload = function() { const base64Audio = reader.result.split(',')[1]; const voicegramXML = ` <voicegram> <location>${getLocation()}</location> <timestamp>${new Date().toISOString()}</timestamp> <audio encoding="base64">${base64Audio}</audio> </voicegram> `; saveVoicegram(voicegramXML); }; reader.readAsDataURL(audioBlob);
const url = URL.createObjectURL(audioBlob); const audio = document.createElement('audio'); audio.controls = true; audio.src = url; const li = document.createElement('li'); li.appendChild(audio); document.getElementById('recordingsList').appendChild(li); audioChunks = []; }
function getLocation() { return 'Oslo, Norway'; }
function saveVoicegram(voicegramXML) { let storedVoicegrams = JSON.parse(localStorage.getItem('voicegrams')) || []; storedVoicegrams.push(voicegramXML); localStorage.setItem('voicegrams', JSON.stringify(storedVoicegrams)); console.log('Voicegram lagret lokalt:', storedVoicegrams); }
$('#startRecording').click(function() { startRecording(); $('#startRecording').prop('disabled', true); $('#stopRecording').prop('disabled', false); });
$('#stopRecording').click(function() { stopRecording(); $('#startRecording').prop('disabled', false); $('#stopRecording').prop('disabled', true); }); </script> </body> </html> --
Best, Ole Aamot