DOCTOR VOX - Level Up [Royalty Free Music]

By: Argofox

3001   34   277308

Uploaded on 01/26/2015

Argofox: royalty free background music for YouTube videos and Twitch streams. Monetize songs with no copyright concerns!
Spotify Playlist: http://bit.ly/ArgofoxSP
YouTube Playlist: http://bit.ly/ArgofoxUL
SoundCloud Playlist: http://bit.ly/ArgofoxSC

Stream on Spotify: http://spoti.fi/1EMOT0x
Support on iTunes: http://bit.ly/1wxVky7
Download on SoundCloud: http://bit.ly/18kxhyw

DOCTOR VOX:
http://youtube.com/DOCTORVOXofficial
http://facebook.com/doctorvox
http://soundcloud.com/doctor-vox
http://twitter.com/theDOCTORVOX

Argofox
https://soundcloud.com/argofox
https://facebook.com/argofox
https://youtube.com/argofox
https://twitter.com/argofox
https://discord.gg/argofox

If you use this track on YouTube, please link to this upload in your description. If you use this track on Twitch, please link to this channel in a panel using this image: http://bit.ly/ArgofoxTP

Comments (6):

By anonymous    2017-09-20

It would be nice if you'd post a gif or something to show what you're referring to.

Making something audio reactive is pretty simple though. Here's an open source site with lots audio reactive examples.

As for how to do it you basically use the Web Audio API to stream the music and use it's AnalyserNode to get audio data out.

"use strict";

// make a Web Audio Context
var context = new AudioContext();
var analyser = context.createAnalyser();

// Make a buffer to receive the audio data
var numPoints = analyser.frequencyBinCount;
var audioDataArray = new Uint8Array(numPoints);

var ctx = document.querySelector("canvas").getContext("2d");

function render() {
  ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);

  // get the current audio data
  analyser.getByteFrequencyData(audioDataArray);
  
  const width = ctx.canvas.width;
  const height = ctx.canvas.height;
  const size = 5;

  // draw a point every size pixels
  for (let x = 0; x < width; x += size) {
    // compute the audio data for this point
    const ndx = x * numPoints / width | 0;
    // get the audio data and make it go from 0 to 1
    const audioValue = audioDataArray[ndx] / 255;
    // draw a rect size by size big
    const y = audioValue * height;
    ctx.fillRect(x, y, size, size);
  }
  requestAnimationFrame(render);
}
requestAnimationFrame(render);

// Make a audio node
var audio = new Audio();
audio.loop = true;
audio.autoplay = true;

// this line is only needed if the music you are trying to play is on a
// different server than the page trying to play it.
// It asks the server for permission to use the music. If the server says "no"
// then you will not be able to play the music
audio.crossOrigin = "anonymous";

// call `handleCanplay` when it music can be played
audio.addEventListener('canplay', handleCanplay);
audio.src = "https://twgljs.org/examples/sounds/DOCTOR%20VOX%20-%20Level%20Up.mp3";
audio.load();


function handleCanplay() {
  // connect the audio element to the analyser node and the analyser node
  // to the main Web Audio context
  const source = context.createMediaElementSource(audio);
  source.connect(analyser);
  analyser.connect(context.destination);
}
canvas { border: 1px solid black; display: block; }
<canvas></canvas>

Then it's just up to you to do something creative. For example instead of drawing a bunch of black dots across the screen like the first example we could scale random colored circles and adjust their color and velocity something like this

"use strict";

var context = new AudioContext();
var analyser = context.createAnalyser();

var numPoints = analyser.frequencyBinCount;
var audioDataArray = new Uint8Array(numPoints);

var ctx = document.querySelector("canvas").getContext("2d");
var ctx2 = document.createElement("canvas").getContext("2d");

var numSpots = 5;
var spots = [];
for (var ii = 0; ii < numSpots; ++ii) {
  spots.push({
    x: Math.random(), 
    y: Math.random(), 
    velocity: 0.01,
    direction: Math.random(),
    hue: Math.random() * 360 | 0,
  });
}

function rnd(min, max) {
  if (max === undefined) {
    max = min;
    min = 0;
  }
  return Math.random() * (max - min) + min;
}

function render() {
  ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
  ctx.save();
  ctx.globalAlpha = .97;
  ctx.globalCompositeOperation = "source-out";
  ctx.translate(ctx.canvas.width / 2, ctx.canvas.height / 2);
  ctx.scale(1.001, 1.001);
  ctx.rotate(0.003);
  ctx.translate(-ctx.canvas.width / 2, -ctx.canvas.height / 2);
  ctx.drawImage(ctx2.canvas, 0, 0, ctx.canvas.width, ctx.canvas.height);
  ctx.restore();

  analyser.getByteFrequencyData(audioDataArray);
  
  const width = ctx.canvas.width;
  const height = ctx.canvas.height;

  spots.forEach((spot, n) => {
    const ndx = n * numPoints / numSpots | 0;
    const audioValue = audioDataArray[ndx] / 255;
    const sat = Math.pow(audioValue, 2) * 100;
    
    spot.velocity = audioValue * 0.02;
    spot.direction = (spot.direction + 1 + rnd(-.01, 0.01)) % 1;
    const angle = spot.direction * Math.PI * 2;
    spot.x = (spot.x + Math.cos(angle) * spot.velocity + 1) % 1; 
    spot.y = (spot.y + Math.sin(angle) * spot.velocity + 1) % 1;
    
    ctx.fillStyle = "hsl(" + spot.hue + "," + sat + "%,50%)";
    ctx.beginPath();
    ctx.arc(spot.x * width, spot.y * height, 50 * audioValue, 0, Math.PI * 2, false);
    ctx.fill();
  });
  
  var temp = ctx;
  ctx = ctx2;
  ctx2 = temp;
  
  requestAnimationFrame(render);
}
requestAnimationFrame(render);


var audio = new Audio();
audio.loop = true;
audio.autoplay = true;
// this line is only needed if the music you are trying to play is on a
// different server than the page trying to play it.
// It asks the server for permission to use the music. If the server says "no"
// then you will not be able to play the music
audio.crossOrigin = "anonymous";

audio.addEventListener('canplay', handleCanplay);
audio.loop = true;
audio.src = "https://twgljs.org/examples/sounds/DOCTOR%20VOX%20-%20Level%20Up.mp3";
audio.load();


function handleCanplay() {
  const source = context.createMediaElementSource(audio);
  source.connect(analyser);
  analyser.connect(context.destination);
}
canvas { border: 1px solid black; display: block; }
<canvas></canvas>


music: DOCTOR VOX - Level Up

Original Thread

By anonymous    2017-09-20

Making something audio reactive is pretty simple. Here's an open source site with lots audio reactive examples.

As for how to do it you basically use the Web Audio API to stream the music and use its AnalyserNode to get audio data out.

"use strict";

// make a Web Audio Context
var context = new AudioContext();
var analyser = context.createAnalyser();

// Make a buffer to receive the audio data
var numPoints = analyser.frequencyBinCount;
var audioDataArray = new Uint8Array(numPoints);

var ctx = document.querySelector("canvas").getContext("2d");

function render() {
  ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);

  // get the current audio data
  analyser.getByteFrequencyData(audioDataArray);
  
  const width = ctx.canvas.width;
  const height = ctx.canvas.height;
  const size = 5;

  // draw a point every size pixels
  for (let x = 0; x < width; x += size) {
    // compute the audio data for this point
    const ndx = x * numPoints / width | 0;
    // get the audio data and make it go from 0 to 1
    const audioValue = audioDataArray[ndx] / 255;
    // draw a rect size by size big
    const y = audioValue * height;
    ctx.fillRect(x, y, size, size);
  }
  requestAnimationFrame(render);
}
requestAnimationFrame(render);

// Make a audio node
var audio = new Audio();
audio.loop = true;
audio.autoplay = true;

// this line is only needed if the music you are trying to play is on a
// different server than the page trying to play it.
// It asks the server for permission to use the music. If the server says "no"
// then you will not be able to play the music
// Note if you are using music from the same domain 
// **YOU MUST REMOVE THIS LINE** or your server must give permission.
audio.crossOrigin = "anonymous";

// call `handleCanplay` when it music can be played
audio.addEventListener('canplay', handleCanplay);
audio.src = "https://twgljs.org/examples/sounds/DOCTOR%20VOX%20-%20Level%20Up.mp3";
audio.load();


function handleCanplay() {
  // connect the audio element to the analyser node and the analyser node
  // to the main Web Audio context
  const source = context.createMediaElementSource(audio);
  source.connect(analyser);
  analyser.connect(context.destination);
}
canvas { border: 1px solid black; display: block; }
<canvas></canvas>  

Then it's just up to you to draw something creative.

note some troubles you'll likely run into.

  1. At this point in time (2017/1/3) neither Android Chrome nor iOS Safari support analysing streaming audio data. Instead you have to load the entire song. Here'a a library that tries to abstract that a little

  2. As pointed out in the sample you can only analyse audio if the source is either from the same domain OR you ask for CORS permission and the server gives permission. AFAIK only Soundcloud gives permission and it's on a per song basis. It's up to the individual artist's song's settings whether or not audio analysis is allowed for a particular song.

    To try to explain this part

    The default is you have permission to access all data from the same domain but no permission from other domains.

    When you add

    audio.crossOrigin = "anonymous";
    

    That basically says "ask the server for permission for user 'anonymous'". The server can give permission or not. It's up to the server. This includes asking even the server on the same domain which means if you're going to request a song on the same domain you need to either (a) remove the line above or (b) configure your server to give CORS permission. Most servers by default do not give CORS permission so if you add that line, even if the server is the same domain, if it does not give CORS permission then trying to analyse the audio will fail.


music: DOCTOR VOX - Level Up

Original Thread

By anonymous    2017-09-20

I don't know if this is really an answer to your question or not but you should probably be using a texture for this. Using a texture has multiple advantages

  • You can render the entire screen with just a single quad.

    This is rendered destination based meaning it will do the minimal amount of work, one unit of work per destination pixel whereas with lines/points you're likely doing far far more work per destination pixel. This means you shouldn't have to worry about performance.

  • Textures are random access meaning you use the data in more ways than you can with buffers/attributes

  • Textures are sampled so handling the case where freqData.length !== w is handled better.

  • Because textures are random access you could pass idx into the shader and use it to manipulate the texture coordinates so that the top or bottom line is always the newest data and the rest scrolls. This would be harder with attributes/buffers

  • Textures can be written to from the GPU by attaching them to a framebuffer. This would also let you scroll where you use 2 textures, each frame copy h - 1 lines from tex1 to tex2 but shifted up or down one line. Then copy freqData to the first or last line. Next frame do the same but use tex2 for the source and tex1 for the destiantion.

    This would also let you scroll the data. It's arguably slightly slower than passing idx into the shader and manipulating the texture coordinates but it makes the texture coordinate usage consistent so if you want to do any fancier visualization you don't have to take idx into account every where you sample the texture.

    vertexshaderart.com uses this technique so shaders don't have to take into account some value like idx to figure out where the newest data is in the texture. The newest data is always at texture coordinate v = 0

Here's a sample. It does neither of the last 2 things, just uses a texture instead of a buffer.

const audio = document.querySelector('audio');
const canvas = document.querySelector('canvas');

const audioCtx = new AudioContext();
const source = audioCtx.createMediaElementSource(audio);
const analyser = audioCtx.createAnalyser();
const freqData = new Uint8Array(analyser.frequencyBinCount);

source.connect(analyser);
analyser.connect(audioCtx.destination);

const gl = canvas.getContext('webgl');

const frag = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(frag, `
  precision mediump float;
  varying vec2 v_texcoord;

  uniform sampler2D tex;
  
  float P = 5.5;

  void main() {
    // these 2 lines convert from 0.0 -> 1.0 to -1. to +1
    // assuming that signed bytes were put in the texture.
    // This is what the previous buffer based code was doing
    // by using BYTE for its vertexAttribPointer type.
    // The thing is AFAICT the audio data from getByteFrequencyData
    // is unsigned data. See
    // https://webaudio.github.io/web-audio-api/#widl-AnalyserNode-getByteFrequencyData-void-Uint8Array-array
    // But, this is what the old code was doing
    // do I thought I should repeat it here.

    float value = texture2D(tex, v_texcoord).r * 2.;
    value = mix(value, -2. + value, step(1., value));

    float r = 1.0 + sin(value * P);
    float g = 1.0 - sin(value * P);
    float b = 1.0 + cos(value * P);

    gl_FragColor = vec4(r, g, b, 1);
  }
`);
gl.compileShader(frag);

const vert = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vert, `
  attribute vec2 a_position;
  varying vec2 v_texcoord;

  void main() {
    gl_Position = vec4(a_position, 0, 1);
    
    // we can do this because we know a_position is a unit quad
    v_texcoord = a_position * .5 + .5;  
  }
`);
gl.compileShader(vert);

const program = gl.createProgram();
gl.attachShader(program, vert);
gl.attachShader(program, frag);
gl.linkProgram(program);

const a_value = gl.getAttribLocation(program, 'a_value');
const a_position = gl.getAttribLocation(program, 'a_position');
gl.useProgram(program);

const w = freqData.length;
let h = 0;

const pos_buffer = gl.createBuffer()
gl.bindBuffer(gl.ARRAY_BUFFER, pos_buffer)
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
  -1, -1,
   1, -1,
  -1,  1,
  -1,  1,
   1, -1,
   1,  1,
]), gl.STATIC_DRAW);
gl.vertexAttribPointer(a_position, 2, gl.FLOAT, true, 0, 0);
gl.enableVertexAttribArray(a_position);

const texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);

let idx = 0
function render() {
  resizeCanvasToDisplaySize(gl.canvas);
  
  gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
  
  if (gl.canvas.height !== h) {
     // reallocate texture. Note: more work would be needed
     // to save old data. As is if the user resizes the 
     // data will be cleared
     h = gl.canvas.height;
     gl.bindTexture(gl.TEXTURE_2D, texture);
     gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, w, h, 0, 
                   gl.LUMINANCE, gl.UNSIGNED_BYTE, null);
     idx = 0;
  }

  analyser.getByteFrequencyData(freqData);
  
  gl.bindTexture(gl.TEXTURE_2D, texture);
  gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, idx, w, 1, 
                   gl.LUMINANCE, gl.UNSIGNED_BYTE, freqData);

  gl.drawArrays(gl.TRIANGLES, 0, 6);

  idx = (idx + 1) % h;

  requestAnimationFrame(render);
}
requestAnimationFrame(render);

function resizeCanvasToDisplaySize(canvas) {
  const w = canvas.clientWidth;
  const h = canvas.clientHeight;
  if (canvas.width !== w || canvas.height !== h) {
    canvas.width = w;
    canvas.height = h;
  }
}
body{ margin: 0; font-family: monospace; }
canvas { 
  position: absolute;
  left: 0;
  top: 0;
  width: 100vw; 
  height: 100vh; 
  display: block; 
  z-index: -1;
}
<audio src="https://twgljs.org/examples/sounds/DOCTOR VOX - Level Up.mp3" controls="" crossOrigin="" autoplay></audio>
<div>music: <a href="http://youtu.be/eUX39M_0MJ8">DOCTOR VOX - Level Up</a></div>
<canvas></canvas>

Original Thread

Submit Your Video

If you have some great dev videos to share, please fill out this form.