纯前端实现画面加音频的视频合成效果实例页面

回到相关文章 »

效果:

画面资源

字幕:[{
    "text": "前面的斗帝",
    "startTime": 0,
    "endTime": 1.5
}, {
    "text": "你敢不敢下马和我一战",
    "startTime": 1.7,
    "endTime": 5.421
}]

配音素材

//zxx: 视频时长以这个为准

背景音乐(可选)

代码:

JS代码:
import { clipAudio, mergeAudio} from './audioBuffer.js'
// 顶层变量存储文件对象,供后续合成使用
let bgAudioFile = null;

const {
    Input,
    Output,
    Conversion,
    ALL_FORMATS,
    AudioBufferSource,
    Mp4OutputFormat,
    BufferTarget,
    CanvasSource,
    QUALITY_HIGH 
} = Mediabunny;

// 创建绘制的canvas元素
const canvas = new OffscreenCanvas(1080, 740);
const context = canvas.getContext('2d');
// 设置字体
context.font = '64px system-ui';
context.textAlign = 'center';
context.textBaseline = 'middle';

// 视频选择与预览
fileAudio.addEventListener('change', (event) => {
    const file = event.target.files[0];
    if (file) {
        bgAudioFile = file;
        audioPreview.innerHTML = `<audio src="${URL.createObjectURL(file)}" controls></audio>`;
    }
});

// 字幕控制
const jsonCaption = [{
    "text": "前面的斗帝",
    "startTime": 0,
    "endTime": 1.5
}, {
    "text": "你敢不敢下马和我一战",
    "startTime": 1.8,
    "endTime": 5.4
}];
// 绘制方法
const draw = function (frame) {
    const time = frame / 30;
    context.clearRect(0, 0, canvas.width, canvas.height);
    context.drawImage(originSource, 0, 0, canvas.width, canvas.height);
    // 绘制字幕
    jsonCaption.some((item) => {
        if (time >= item.startTime && time < item.endTime) {
            context.fillText(item.text, canvas.width / 2, canvas.height / 2);
            return true;
        }
    });
};

draw(0);

form.addEventListener('submit', async (event) => {
    event.preventDefault();

    const submit = form.querySelector('button');
    submit.textContent = '合成中...';   
    submit.disabled = true;

    // 定义一个视频合成输出
    const output = new Output({
        format: new Mp4OutputFormat(), 
        target: new BufferTarget(),
    });

    // Example: add a video track driven by a canvas
    const videoSource = new CanvasSource(canvas, {
        codec: 'avc',
        bitrate: QUALITY_HIGH,
    });
    output.addVideoTrack(videoSource);

    // Example: add an audio track driven by AudioBuffers
    const audioSource = new AudioBufferSource({
        codec: 'aac',
        bitrate: QUALITY_HIGH,
    });
    output.addAudioTrack(audioSource);

    await output.start();

    // 获取音频文件
    const duration = audioFile.duration;

    // 每秒30帧
    for (let frame = 0; frame < 30 * duration; frame++) {
        draw(frame);
        await videoSource.add(frame / 30, 1 / 30);
    }

    // 添加音频资源
    // 首先是配音资源
    const response = await fetch(audioFile.src);
    const arrayBuffer = await response.arrayBuffer();
    const audioBuffer1 = await new AudioContext().decodeAudioData(arrayBuffer);

    // 背景音乐资源,如果有
    if (bgAudioFile) {
        const arrayBuffer2 = await bgAudioFile.arrayBuffer();
        const audioBuffer2 = await new AudioContext().decodeAudioData(arrayBuffer2);
        // 剪裁背景音乐
        const clippedAudioBuffer = clipAudio(audioBuffer2, [0, duration]);
        await audioSource.add(mergeAudio([audioBuffer1, clippedAudioBuffer]));
    } else {
        await audioSource.add(audioBuffer1);
    }

    await output.finalize();

    const buffer = output.target.buffer;
    const url = URL.createObjectURL(new Blob([buffer]));
    result.innerHTML = `<video src="${url}" controls></video>`;
    submit.textContent = '开始合成';
    submit.disabled = false;
});