用戶通過(guò)上傳合適尺寸的圖片,選著渲染動(dòng)畫的效果和音樂(lè),可以預(yù)覽類似幻燈片的效果,最后點(diǎn)擊確認(rèn)生成視頻,可以放到頭條或者抖音播放。
生成視頻可能的方案
純前端的視頻編碼轉(zhuǎn)換(例如WebM Encoder Whammy)
- 圖片地址只能是相對(duì)地址
- 音樂(lè)不能收錄
- 生成的視頻需要下載再上傳
將每幀圖片傳給后端實(shí)現(xiàn),由后端調(diào)用FFmpeg進(jìn)行視頻轉(zhuǎn)碼
- 截圖多的時(shí)候,base64字符串形式的圖片太大,在前端不好傳給后端
- 在前端截圖還依賴用戶電腦性能;
最后定的方案流程
- canvas動(dòng)畫和截圖在服務(wù)器端運(yùn)行,后端根據(jù)標(biāo)識(shí)獲取截圖
- 利用FFmpeg將圖片合并成視頻,并將視頻存儲(chǔ)在server端,并返回相應(yīng)下載url
- 前端通過(guò)請(qǐng)求得到視頻文件
前端canvas如何截圖
每幀圖片生成
圖片生成可以通過(guò)canvas原生接口toDataURL實(shí)現(xiàn),最終返回base64形式的圖像數(shù)據(jù)
function generatePng() {
var canvas = document.createElement('canvas');
let icavas = '#canvas' //渲染動(dòng)畫的canvas id
if (wrapWidth == 2) {
icavas = '#verticalCanvas'
}
var canvasNode = document.querySelector(icavas)
canvas.width = canvasNode.width;
canvas.height = canvasNode.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(canvasNode, 0, 0);
var imgData = canvas.toDataURL("image/png");
return imgData;
}
canvas動(dòng)畫截圖的方法
用setInterval定時(shí)執(zhí)行圖片生成的方法,當(dāng)然也可以用requestAnimationFrame
setInterval(function() {
imgsTemp.push(generatePng())
}, 1000/60)
后端如何獲取每幀圖片
方案一:無(wú)頭瀏覽器運(yùn)行前端canvas動(dòng)畫js,然后js截圖
最初設(shè)想:
截圖用console.log打印出來(lái),canvas截圖是base64格式的,一個(gè)15秒的動(dòng)畫,截圖有100多張,直接導(dǎo)致服務(wù)器運(yùn)行崩潰(被否了);
試運(yùn)行方案:
截圖存儲(chǔ)在js變量中,動(dòng)畫播放完成,在頁(yè)面中加一個(gè)標(biāo)識(shí),然后后端去取這個(gè)變量,代碼如下:
const pages = {
imageZoomOut: import ('./image_zoom_inout.js'), //縮放
imageArt: import ('./image_art.js'), //擦除
imageGrid: import ('./image_grid.js'), //網(wǎng)格
imageRotate: import ('./image_rotate.js'), //開合
imageFlash: import ('./image_flash.js'), //圖文快閃
imageVerticalArt: import ('./image_vertical_art.js'), //豎版擦除
imageVerticalGrid: import ('./image_vertical_grid.js'), //豎版網(wǎng)格
imageVerticalRotate: import ('./image_vertical_rotate.js'), //豎版開合
imageVerticalFlash: import ('./image_vertical_flash.js'), //豎版圖文快閃
imageVerticalZoomOut: import ('./image_vertical_zoom_inout.js'), //豎版縮放
imageVertical: import ('./image_vertical.js'), //豎版通用
};
var isShow = false
var imgsBase64 = []
var imgsTemp = []
var cutInter = null
var imgsTimeLong = 0
function getQuerys(tag) {
let queryStr = window.location.search.slice(1);
let queryArr = queryStr.split('&');
let query = [];
let spec = {}
for (let i = 0, len = queryArr.length; i < len; i++) {
let queryItem = queryArr[i].split('=');
let qitem = decodeURIComponent(queryItem[1])
if (queryItem[0] == tag) {
query.push(qitem);
} else {
spec[queryItem[0]] = qitem
}
}
return { list: query, spec: spec };
}
var getQuery = getQuerys('images')
var effectTag = getQuery.spec.tid
var wrapWidth = getQuery.spec.templateType
let num = 0
let imgArr = []
function creatImg() {
var images = getQuery.list
let newImg = []
let vh = wrapWidth == 1 ? 360 : 640
let vw = wrapWidth == 1 ? 640 : 360
if (effectTag.indexOf('Flash') > -1) {
images.map(function(item, index) {
if (11 === index || 13 === index || 16 === index) {
var temp = new Image(vw, vh)
temp.setAttribute('crossOrigin', 'anonymous');
temp.src = item;
newImg.push(temp)
} else {
newImg.push(item)
}
})
imgArr = newImg
renderAnimate(effectTag)
} else {
images.map(function(item) {
var temp = new Image(vw, vh)
temp.setAttribute('crossOrigin', 'anonymous');
temp.src = item;
temp.onload = function() {
num++
if (num == images.length) {
renderAnimate(effectTag)
}
}
newImg.push(temp)
})
imgArr = newImg
}
}
async function renderAnimate(page) {
//await creatImg()
let me = this
const pageA = await pages[page];
let oldDate = new Date().getTime()
let icavas = '#canvas'
if (wrapWidth == 2) {
icavas = '#verticalCanvas'
}
let innerCanvas = document.querySelector(icavas)
isShow = false
pageA[page].render(null, {
canvas: innerCanvas,
images: imgArr
}, function() {
//動(dòng)畫播完
isShow = true;
imgsTemp.push(generatePng())
imgsBase64.push(imgsTemp)
let now = new Date().getTime()
window.imgsTimeLong = now - oldDate
clearInterval(cutInter)
document.getElementById('cutImg').innerHTML = 'done'//頁(yè)面標(biāo)識(shí)
})
cutInter = setInterval(function() {
imgsTemp.push(generatePng())
if (imgsTemp.length >= 50) {
imgsBase64.push(imgsTemp)
imgsTemp = []
}
}, 130)
}
function getImgs() {
return imgsBase64
}
function generatePng() {
var canvas = document.createElement('canvas');
let icavas = '#canvas'
if (wrapWidth == 2) {
icavas = '#verticalCanvas'
}
var canvasNode = document.querySelector(icavas)
canvas.width = canvasNode.width;
canvas.height = canvasNode.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(canvasNode, 0, 0);
var imgData = canvas.toDataURL("image/png");
return imgData;
}
window.imgsBase64 = imgsBase64 //截圖存儲(chǔ)變量
creatImg()
試運(yùn)行方案的弊端:
- 截圖間隔130ms截一張圖片,截圖數(shù)量太少,導(dǎo)致生成的動(dòng)畫不流暢;
- 截圖間隔調(diào)成1秒60幀的話,動(dòng)畫播放緩慢,導(dǎo)致生成視頻時(shí)間變長(zhǎng);(settimeout和setinterval的機(jī)制)
- 圖片尺寸在640x360或者360x640,生成的動(dòng)畫在手機(jī)端預(yù)覽不清晰;
- 需求換成圖片尺寸為1280x720或者720x1280之后,原本15秒的動(dòng)畫在服務(wù)器端執(zhí)行變成了70多秒
- canvas截圖存在跨域問(wèn)題,可以如下設(shè)置
var temp = new Image(vw, vh)
temp.setAttribute('crossOrigin', 'anonymous');
最終方案:在NODE端運(yùn)行動(dòng)畫
用node-canvas,把每幀截圖用 fs.writeFile
寫到指定的文件夾里
const {
createCanvas,
loadImage
} = require("canvas");
const pages = {
imageZoomOut: require('./image_zoom_inout.js'), //縮放
imageArt: require('./image_art.js'), //擦除
imageGrid: require('./image_grid.js'), //網(wǎng)格
imageRotate: require('./image_rotate.js'), //開合
imageFlash: require('./image_flash.js'), //圖文快閃
imageVerticalArt: require('./image_vertical_art.js'), //豎版擦除
imageVerticalGrid: require('./image_vertical_grid.js'), //豎版網(wǎng)格
imageVerticalRotate: require('./image_vertical_rotate.js'), //豎版開合
imageVerticalFlash: require('./image_vertical_flash.js'), //豎版圖文快閃
imageVerticalZoomOut: require('./image_vertical_zoom_inout.js'), //豎版縮放
imageVertical: require('./image_vertical.js'), //豎版通用
};
const fs = require("fs");
const querystring = require('querystring');
let args = process.argv && process.argv[2]
let parse = querystring.parse(args)
let vh = parse.templateType == 1 ? 720 : 1280 //canvas 高
let vw = parse.templateType == 1 ? 1280 : 720 //canvas 寬
let imgSrcArray = parse.images //圖片數(shù)組
let effectTag = parse.tid //動(dòng)畫效果
let saveImgPath = process.argv && process.argv[3]
let loadArr = []
imgSrcArray.forEach(element => {
if (/\.(jpg|jpeg|png|JPG|PNG)$/.test(element)) {
loadArr.push(loadImage(element))
} else {
loadArr.push(element)
}
});
const canvas = createCanvas(vw, vh);
const ctx = canvas.getContext("2d");
Promise.all(loadArr)
.then((images) => {
//初始化動(dòng)畫
console.log('開始動(dòng)畫')
let oldDate = new Date().getTime()
pages[effectTag].render(null, {
canvas: canvas,
images: images
}, function() {
clearInterval(interval)
let now = new Date().getTime()
console.log(now - oldDate, '動(dòng)畫結(jié)束')
})
const interval = setInterval(
(function() {
let x = 0;
return () => {
x += 1;
ctx.canvas.toDataURL('image/jpeg', function(err, png) {
if (err) {
console.log(err);
return;
}
let data = png.replace(/^data:image\/\w+;base64,/, '');
let buf = new Buffer(data, 'base64');
fs.writeFile(`${saveImgPath}${x}.jpg`, buf, {}, (err) => {
console.log(x, err);
return;
});
});
};
})(),
1000 / 60
);
})
.catch(e => {
console.log(e);
});
在iterm下執(zhí)行下面命令
node testCanvas.js 'tid=imageArt&templateType=1&images=../assets/imgs/8.png&images=../assets/imgs/6.png&images=../assets/imgs/7.png&images=../assets/imgs/6.png&images=../assets/imgs/8.png&images=../assets/imgs/7.png&images=../assets/imgs/4.png&images=../assets/imgs/6.png&images=../assets/imgs/8.png&images=../assets/imgs/7.png' './images/'
參數(shù)說(shuō)明:
1)tid 是動(dòng)畫名稱
2)templateType是尺寸:"1":1280*720;"2":720*1280
3) images是圖片地址
4)變量'./images/'是截圖保存的地址,
NODE環(huán)境下運(yùn)行的弊端
- 參數(shù)圖片地址只能是相對(duì)地址
- 動(dòng)畫過(guò)于復(fù)雜時(shí),運(yùn)行時(shí)間長(zhǎng),如下:當(dāng)頁(yè)面的圖形數(shù)量達(dá)到一定時(shí),動(dòng)畫每一幀就要大量調(diào)用canvas的API,要進(jìn)行大量的計(jì)算,再加上圖片體積很大,就會(huì)慢
每隔13秒循環(huán)一次下面的畫圖:
for (var A = 0; 50 > A; A++)
p.beginPath(),
p.globalAlpha = 1 - A / 49,
p.save(),
p.arc(180,320,P + 2 * A, 0, 2 * Math.PI),
p.clip(),
p.drawImage(x[c], 0, 0, y.width, y.height),
p.restore(),
p.closePath();
for (var S = 0; 50 > S; S++)
p.beginPath(),
p.globalAlpha = 1 - S / 49,
p.save(),
p.rect(0, 0, d + P + 2 * S, g + b + 2 * S),
p.clip(),
p.drawImage(x[c], 0, 0, y.width, y.height),
p.restore(),
p.closePath();
因?yàn)镹ode.js 的事件循環(huán)模型,要求 Node.js 的使用必須時(shí)刻保證 Node.js 的循環(huán)能夠運(yùn)轉(zhuǎn),如果出現(xiàn)非常耗時(shí)的函數(shù),那么事件循環(huán)就會(huì)陷入進(jìn)去,無(wú)法及時(shí)處理其他的任務(wù),所以導(dǎo)致有些動(dòng)畫還是慢
后期優(yōu)化的可能
嘗試用go語(yǔ)言,來(lái)截圖;
重寫canvas動(dòng)畫;
番外
視頻碼率
視頻碼率就是數(shù)據(jù)傳輸時(shí)單位時(shí)間傳送的數(shù)據(jù)位數(shù),一般我們用的單位是kbps即千位每秒。通俗一點(diǎn)的理解就是取樣率,單位時(shí)間內(nèi)取樣率越大,精度就越高,處理出來(lái)的文件就越接近原始文件。舉例來(lái)看,對(duì)于一個(gè)音頻,其碼率越高,被壓縮的比例越小,音質(zhì)損失越小,與音源的音質(zhì)越接近。
FPS 每秒傳輸幀數(shù)(Frames Per Second))
FPS是圖像領(lǐng)域中的定義,是指畫面每秒傳輸幀數(shù),通俗來(lái)講就是指動(dòng)畫或視頻的畫面數(shù)。FPS是測(cè)量用于保存、顯示動(dòng)態(tài)視頻的信息數(shù)量。每秒鐘幀數(shù)愈多,所顯示的動(dòng)作就會(huì)愈流暢。通常,要避免動(dòng)作不流暢的最低是30。例如電影以每秒24張畫面的速度播放,也就是一秒鐘內(nèi)在屏幕上連續(xù)投射出24張靜止畫面。
以上就是本文的全部?jī)?nèi)容,希望對(duì)大家的學(xué)習(xí)有所幫助,也希望大家多多支持腳本之家。