processing:video_capture
Differences
This shows you the differences between two versions of the page.
Both sides previous revisionPrevious revisionNext revision | Previous revision | ||
processing:video_capture [2013/04/12 18:48] – [Reduce resolution] mithat | processing:video_capture [2013/08/23 04:10] (current) – [Video capture examples] mithat | ||
---|---|---|---|
Line 1: | Line 1: | ||
====== Video capture examples ====== | ====== Video capture examples ====== | ||
- | Examples | + | The examples |
===== List available capture devices ===== | ===== List available capture devices ===== | ||
- | The following sketch will print out a list of available capture devices to the console. You'll need at least one entry to do video capture. On Linux (the only platform I tested) different resolutions show up as different devices. | + | You'll need at least one available device to do video capture. |
<file java list_capture_devices.pde> | <file java list_capture_devices.pde> | ||
Line 31: | Line 31: | ||
===== Show video ===== | ===== Show video ===== | ||
+ | There are two ways you can set up video capture: by requesting capture parameters or specifying the device. | ||
+ | |||
==== By requesting capture parameters ==== | ==== By requesting capture parameters ==== | ||
<file java show_capture_by_parameters.pde> | <file java show_capture_by_parameters.pde> | ||
Line 66: | Line 68: | ||
==== By specifying device ==== | ==== By specifying device ==== | ||
- | Note that Processing will crop the capture to the canvas size if it doesn' | + | Processing will crop the capture to the canvas size if it doesn' |
<file java show_capture_by_devnum.pde> | <file java show_capture_by_devnum.pde> | ||
Line 112: | Line 114: | ||
</ | </ | ||
- | <WRAP center round info 80%> | + | |
+ | <WRAP center round tip 80%> | ||
===== Capture is a PImage ===== | ===== Capture is a PImage ===== | ||
- | Something the documentation on the '' | + | Something the [[http:// |
<code java> | <code java> | ||
- | This means all the methods and fields available to '' | + | This means all the methods and fields available to PImage objects should also be available to Capture objects. |
</ | </ | ||
Line 262: | Line 265: | ||
</ | </ | ||
+ | You can render grayscale instead of color above by replacing the line <code java> | ||
+ | |||
+ | |||
+ | ===== Brightest pixel ===== | ||
+ | The following example takes the reduced resolution camera feed above and draws a colored circle inside the brightest pixel found. If there is more than one pixel with the same brightness, then it shows the first one (scanning from top-left to bottom-right). It also adds some code to indicate frame timing. | ||
+ | |||
+ | <file java capture_brightest_pixel.pde> | ||
+ | /** Reduce resolution of captured image and indicate brightest pixel. | ||
+ | * @author Mithat Konar | ||
+ | */ | ||
+ | |||
+ | import processing.video.*; | ||
+ | |||
+ | // === Program constants === // | ||
+ | // Rendering parameters: | ||
+ | // number of cells the rendered image should be in each direction: | ||
+ | final int REDUCED_WIDTH = 32; | ||
+ | final int REDUCED_HEIGHT = 24; | ||
+ | |||
+ | // Canvas parameters: | ||
+ | // number of times you want REDUCED image blown up: | ||
+ | final int OUTPUT_SCALE = 20; | ||
+ | // frame rate of rendered output: | ||
+ | final int CANVAS_FPS = 6; // should divide evenly into CAM_FPS | ||
+ | // to avoid jitter. | ||
+ | |||
+ | // Video capture parameters | ||
+ | // (adjust as neeeded for your platform' | ||
+ | final int CAM_WIDTH = 320; | ||
+ | final int CAM_HEIGHT = 240; | ||
+ | final int CAM_FPS = 30; | ||
+ | |||
+ | // === Global variables ===// | ||
+ | Capture cam; // The video capture device. | ||
+ | PImage img; // Buffer image. | ||
+ | |||
+ | int blink_state = 0; | ||
+ | |||
+ | // === GO! === // | ||
+ | void setup() { | ||
+ | frameRate(CANVAS_FPS); | ||
+ | size(REDUCED_WIDTH*OUTPUT_SCALE, | ||
+ | ellipseMode(CORNER); | ||
+ | |||
+ | if (Capture.list().length == 0) { | ||
+ | println(" | ||
+ | exit(); | ||
+ | } | ||
+ | |||
+ | // Instantiate a buffer image used for subsampling, | ||
+ | img = createImage(REDUCED_WIDTH, | ||
+ | |||
+ | // Instantiate a new Capture object, requesting the specs: | ||
+ | cam = new Capture(this, | ||
+ | cam.start(); | ||
+ | } | ||
+ | |||
+ | void draw() { | ||
+ | int brightestCol = 0; | ||
+ | int brightestRow = 0; | ||
+ | float bightestIntensity = 0.0; | ||
+ | | ||
+ | // Grab a frame | ||
+ | if (cam.available() == true) { | ||
+ | cam.read(); | ||
+ | } | ||
+ | |||
+ | // We are using a buffer img because | ||
+ | // cam.resize(REDUCED_WIDTH, | ||
+ | // doesn' | ||
+ | img.copy(cam, | ||
+ | img.loadPixels(); | ||
+ | |||
+ | // For each column in img: | ||
+ | for (int col = 0; col < REDUCED_WIDTH; | ||
+ | // For each row in img: | ||
+ | for (int row = 0; row < REDUCED_HEIGHT; | ||
+ | // Draw the pixel intensity. | ||
+ | float pixelIntensity = brightness(img.pixels[col + row*img.width]); | ||
+ | fill(pixelIntensity); | ||
+ | stroke(pixelIntensity); | ||
+ | rect(col*OUTPUT_SCALE, | ||
+ | |||
+ | // Determine whether this is the brightest pixel in this frame. | ||
+ | if (pixelIntensity > bightestIntensity) | ||
+ | { | ||
+ | bightestIntensity = pixelIntensity; | ||
+ | brightestCol = col; | ||
+ | brightestRow = row; | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | |||
+ | // Highlight brightest pixel. | ||
+ | fill(# | ||
+ | stroke(# | ||
+ | strokeWeight(2); | ||
+ | ellipse(brightestCol*OUTPUT_SCALE, | ||
+ | | ||
+ | | ||
+ | // Frame timer | ||
+ | fill(# | ||
+ | stroke(# | ||
+ | strokeWeight(1); | ||
+ | blink_state = ++blink_state % CANVAS_FPS; | ||
+ | rect(0, 0, blink_state*OUTPUT_SCALE, | ||
+ | }</ |
processing/video_capture.1365792528.txt.gz · Last modified: 2013/04/12 18:48 by mithat