1、前言
2、后端代码
@RequestMapping(value = "/audioListen")
@ResponseBody
public void getAudio(HttpServletResponse response) throws Exception {
String path = "E:\\111.wav";
String range = "0";
// System.out.println(range);
File file = new File(path);
if (!file.exists()) {
throw new RuntimeException("音频文件不存在 --> 404");
}
OutputStream os = response.getOutputStream();
FileInputStream fis = new FileInputStream(file);
long length = file.length();
// 播放进度
int count = 0;
// 播放百分比
int percent = (int) (length * 1);
int irange = Integer.parseInt(range);
length = length - irange;
response.addHeader("Accept-Ranges", "bytes");
response.addHeader("Content-Length", length + "");
response.addHeader("Content-Range", "bytes " + range + "-" + length + "/" + length);
response.addHeader("Content-Type", "audio/mpeg;charset=UTF-8");
int len = 0;
byte[] b = new byte[1024];
while ((len = fis.read(b)) != -1) {
os.write(b, 0, len);
count += len;
if (count >= percent) {
break;
}
}
fis.close();
os.close();
}
3、前端代码
<!DOCTYPE html>
<html lang="zh" xmlns:th="http://www.thymeleaf.org">
<head>
<title>音频波线</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<meta http-equiv="Pragma" content="no-cache"/>
<meta http-equiv="Cache-Control" content="no-cache, must-revalidate"/>
<meta http-equiv="Expires" content="0"/>
<style type="text/css">
.middle-bottom-left {
width: 33%;
}
.divBox {
background-color: #0d2b55;
background-repeat: repeat;
background-size: 100% 100%;
width: 50%;
height: 50%;
}
.coalA1_top {
width: 100%;
height: 13%;
padding-top: 2%;
padding-left: 13%;
font-size: 4rem;
color: #63cdff;
}
.coalA1_middle {
width: 100%;
height: 75%;
}
.coalA1_bottom {
width: 100%;
height: 10%;
}
#coalA1_audio_canvas {
width: 80%;
margin-left: 10%;
margin-right: 10%;
height: 90%;
margin-top: 5%;
margin-bottom: 5%;
}
</style>
</head>
<body>
<div class="container">
<div class="middle-bottom-left divBox">
<div class="coalA1_top">音频波线</div>
<div class="coalA1_middle">
<canvas id="coalA1_audio_canvas"></canvas>
</div>
<div class="coalA1_bottom"></div>
</div>
</div>
</div>
<script type="text/javascript" th:src="@{/js/jquery.min.js}"></script>
<script type="text/javascript" th:src="@{/front/realtime/js/echarts_gauge.js}"></script>
<script type="text/javascript">
$('document').ready(function () {
$(".container").width($(window).width());
$(".container").height($(window).height());
play_A1_audio();
})
function play_A1_audio() {
var canvas = document.getElementById("coalA1_audio_canvas");
var canvasCtx = canvas.getContext("2d");
//首先实例化AudioContext对象 很遗憾浏览器不兼容,只能用兼容性写法;audioContext用于音频处理的接口,并且工作原理是将AudioContext创建出来的各种节点(AudioNode)相互连接,音频数据流经这些节点并作出相应处理。
//总结就一句话 AudioContext 是音频对象,就像 new Date()是一个时间对象一样
var AudioContext = window.AudioContext || window.webkitAudioContext || window.mozAudioContext;
if (!AudioContext) {
alert("您的浏览器不支持audio API,请更换浏览器(chrome、firefox)再尝试,另外本人强烈建议使用谷歌浏览器!")
}
var audioContext = new AudioContext(); //实例化
var filter = audioContext.createBiquadFilter();
// 总结一下接下来的步骤
// 1 先获取音频文件(目前只支持单个上传)
// 2 读取音频文件,读取后,获得二进制类型的音频文件
// 3 对读取后的二进制文件进行解码
//请求网络解码
function getData() {
var request = new XMLHttpRequest(); //开一个请求
request.open('POST', "/tcc/front/audioListen", true); //往url请求数据
request.responseType = 'arraybuffer'; //设置返回数据类型
request.onload = function () {
var audioData = request.response;
if (new Date().getSeconds() % 10 == 0) {
console.log("play_A1_audio", audioData);
}
//数据缓冲完成之后,进行解码
audioContext.decodeAudioData(audioData, function (buffer) {
//source.buffer = buffer; //将解码出来的数据放入source中
//进行数据处理
// 创建AudioBufferSourceNode 用于播放解码出来的buffer的节点
var audioBufferSourceNode = audioContext.createBufferSource();
// 创建AnalyserNode 用于分析音频频谱的节点
var analyser = audioContext.createAnalyser();
//fftSize (Fast Fourier Transform) 是快速傅里叶变换,一般情况下是固定值2048。具体作用是什么我也不太清除,但是经过研究,这个值可以决定音频频谱的密集程度。值大了,频谱就松散,值小就密集。
analyser.fftSize = 256;
// 连接节点,audioContext.destination是音频要最终输出的目标,
// 我们可以把它理解为声卡。所以所有节点中的最后一个节点应该再
// 连接到audioContext.destination才能听到声音。
audioBufferSourceNode.connect(analyser);
analyser.connect(audioContext.destination);
//console.log(audioContext.destination)
// 播放音频
audioBufferSourceNode.buffer = buffer; //回调函数传入的参数
audioBufferSourceNode.start(); //部分浏览器是noteOn()函数,用法相同
document.documentElement.removeEventListener('mouseenter', null, false);
document.documentElement.addEventListener('mouseenter', () => {
if (audioBufferSourceNode.context.state !== 'running')
audioBufferSourceNode.context.resume();
});
//波形设置
filter.type = 'highpass';
filter.frequency.value = 600;
filter.Q.value = 800;
//可视化 创建数据
var bufferLength = analyser.frequencyBinCount;
// console.log(bufferLength);
var dataArray = new Uint8Array(bufferLength);
//console.log("dataArray",dataArray)
canvasCtx.clearRect(0, 0, 300, 300);
function draw() {
drawVisual = requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
canvasCtx.fillStyle = '#081736';
canvasCtx.fillRect(0, 0, 300, 400);
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = '#ffffff';
canvasCtx.beginPath();
var sliceWidth = 300 * 1.0 / bufferLength;
var x = 0;
for (var i = 0; i < bufferLength; i++) {
var v = dataArray[i] / 128.0;
var y = v * 200 / 5; //控制音频线在图的位置
if (i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}
x += sliceWidth;
}
canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();
};
draw();
}, function (err) {
// alert('!Fail to decode the file!'); //解码出错处理
console.log("!Fail to decode the file!", err)
});
};
request.send();
}
getData();
setInterval(getData, 1000);
}
</script>
</body>
</html>