block by alexmacy 357584793d79f5b7abcf502a60aa0415

(intentionally bad) Volume Control Using Face Tracking

Full Screen

Quick and dirty test at making a really horrible volume control using facial recognition.

If it’s too quiet, move your head closer!

index.html

<!doctype html>
<html>
<head>
  <meta charset="utf-8">
  <title>tracking.js - volume control with camera</title>
  <script src="tracking-min.js"></script>
  <script src="face-min.js"></script>
  <script src="//d3js.org/d3.v4.min.js"></script>
  <style>
  video, canvas {
    margin-left: 230px;
    margin-top: 120px;
    position: absolute;
  }
  .volume-text {
    left: 250px;
    top: 100px;
    position: absolute;
  }
  </style>
</head>
<body>
  <div class="demo-title">
    <p><a href="//trackingjs.com" target="_parent">tracking.js</a> - control the volume with face recognition</p>
  </div>

  <div class="demo-frame">
    <div class="demo-container">
      <video id="video" width="320" height="240" preload autoplay loop muted></video>
      <canvas id="canvas" width="320" height="240"></canvas>
    </div>
  </div>
  <p class="volume-text"></p>

  <script>

    const audioCtx = new (window.AudioContext || window.webkitAudioContext)();
    const gainNode = audioCtx.createGain();
    gainNode.gain.value = 10;
    gainNode.connect(audioCtx.destination);

    const volumeScale = d3.scaleLinear().domain([10000, 40000]).range([1, 100])

    const volumeText = document.querySelector(".volume-text")

    importAudio("https://raw.githubusercontent.com/alexmacy/loops/master/back_on_the_streets_again.wav")

    function importAudio(url) {
        var request = new XMLHttpRequest();
        request.open('GET', url, true);
        request.responseType = 'arraybuffer';
        request.onload = function() {
                audioCtx.decodeAudioData(request.response, function(buffer) {
                        loadAudio(buffer);
                    },
                    function(){alert("Error decoding audio data")}
                );
            }
        request.send();

        function loadAudio(buffer) {
            var source = audioCtx.createBufferSource();
            source.connect(gainNode);
            source.loop = true
            source.buffer = buffer;
            source.connect(audioCtx.destination);
            source.start();
        }
    }
    
    function playSound() {
    }

    window.onload = function() {
      const video = document.getElementById('video');
      const canvas = document.getElementById('canvas');
      const context = canvas.getContext('2d');

      let headSize = 0;

      const tracker = new tracking.ObjectTracker('face');
      tracker.setInitialScale(4);
      tracker.setStepSize(2);
      tracker.setEdgesDensity(0.1);

      tracking.track('#video', tracker, { camera: true });

      tracker.on('track', function(event) {
        context.clearRect(0, 0, canvas.width, canvas.height);

        event.data.forEach(function(rect) {
          headSize = rect.width * rect.height
          volumeText.innerHTML = "Volume: " + d3.format("0.2f")(volumeScale(headSize))
          gainNode.gain.value = volumeScale(headSize);

          context.strokeStyle = '#a64ceb';
          context.strokeRect(rect.x, rect.y, rect.width, rect.height);
          context.font = '11px Helvetica';
          context.fillStyle = "#fff";
          context.fillText('x: ' + rect.x + 'px', rect.x + rect.width + 5, rect.y + 11);
          context.fillText('y: ' + rect.y + 'px', rect.x + rect.width + 5, rect.y + 22);
          context.fillText('area: ' + (rect.width * rect.height), rect.x + rect.width + 5, rect.y + 33);
        });
      });

    };
  </script>

</body>
</html>