processing:video_capture
Differences
This shows you the differences between two versions of the page.
Both sides previous revisionPrevious revisionNext revision | Previous revision | ||
processing:video_capture [2013/04/12 00:16] – mithat | processing:video_capture [2013/08/23 04:10] (current) – [Video capture examples] mithat | ||
---|---|---|---|
Line 1: | Line 1: | ||
- | ====== Video Capture Examples | + | ====== Video capture examples |
- | The following have been adapted | + | The examples below lean heavily on documentation and code from the [[http:// |
===== List available capture devices ===== | ===== List available capture devices ===== | ||
+ | You'll need at least one available device to do video capture. The following sketch will print out a list of available capture devices to the console. On Linux (the only platform I tested) different resolutions show up as different devices. | ||
+ | |||
<file java list_capture_devices.pde> | <file java list_capture_devices.pde> | ||
/** Print out a list of available capture (i.e., video) devices | /** Print out a list of available capture (i.e., video) devices | ||
Line 29: | Line 31: | ||
===== Show video ===== | ===== Show video ===== | ||
+ | There are two ways you can set up video capture: by requesting capture parameters or specifying the device. | ||
- | By requesting capture parameters: | + | ==== By requesting capture parameters |
<file java show_capture_by_parameters.pde> | <file java show_capture_by_parameters.pde> | ||
/** Show video using requested capture parameters | /** Show video using requested capture parameters | ||
Line 52: | Line 54: | ||
} | } | ||
- | // instantiate | + | // Instantiate |
cam = new Capture(this, | cam = new Capture(this, | ||
cam.start(); | cam.start(); | ||
Line 65: | Line 67: | ||
</ | </ | ||
+ | ==== By specifying device ==== | ||
+ | Processing will crop the capture to the canvas size if it doesn' | ||
+ | |||
+ | <file java show_capture_by_devnum.pde> | ||
+ | /** Show video using specified capture device | ||
+ | * @author Mithat Konar | ||
+ | */ | ||
+ | |||
+ | import processing.video.*; | ||
+ | |||
+ | Capture cam; | ||
+ | final int CANVAS_WIDTH = 320; | ||
+ | final int CANVAS_HEIGHT = 240; | ||
+ | final int camnum = 55; // Pick a camnum that makes sense given the canvas size! | ||
+ | |||
+ | void setup() { | ||
+ | size(CANVAS_WIDTH, | ||
+ | |||
+ | String[] camera_list = Capture.list(); | ||
+ | |||
+ | if (camera_list.length == 0) { | ||
+ | println(" | ||
+ | exit(); | ||
+ | } | ||
+ | |||
+ | println(" | ||
+ | for (int i = 0; i < camera_list.length; | ||
+ | println(" | ||
+ | } | ||
+ | |||
+ | println(); | ||
+ | println(" | ||
+ | |||
+ | // Instantiate a Capture object by specifying the device name stored | ||
+ | // in camera_list array: | ||
+ | cam = new Capture(this, | ||
+ | cam.start(); | ||
+ | } | ||
+ | |||
+ | void draw() { | ||
+ | if (cam.available() == true) { | ||
+ | cam.read(); | ||
+ | } | ||
+ | image(cam, 0, 0); | ||
+ | } | ||
+ | </ | ||
+ | |||
+ | |||
+ | <WRAP center round tip 80%> | ||
+ | ===== Capture is a PImage ===== | ||
+ | |||
+ | Something the [[http:// | ||
+ | |||
+ | <code java> | ||
+ | |||
+ | This means all the methods and fields available to PImage objects should also be available to Capture objects. | ||
+ | </ | ||
+ | |||
+ | ===== Reduce resolution ===== | ||
+ | The following example will reduce the resolution of the captured video in both space and time. In addition, it adds a filter that renders the output in black and white. (This might be useful for determining roughly the minimum resolution required for useful processed video results.) | ||
+ | |||
+ | <file java capture_reduce_res.pde> | ||
+ | /** Reduce resolution of captured image. | ||
+ | * @author Mithat Konar | ||
+ | */ | ||
+ | |||
+ | import processing.video.*; | ||
+ | |||
+ | Capture cam; // The video capture device. | ||
+ | PImage img; // Buffer image. | ||
+ | |||
+ | // Canvas parameters | ||
+ | final int CANVAS_WIDTH = 320; | ||
+ | final int CANVAS_HEIGHT = 240; | ||
+ | final int CANVAS_FPS = 4; | ||
+ | |||
+ | // Video capture parameters (adjust as neeeded for your | ||
+ | // platform' | ||
+ | final int CAM_WIDTH = 320; | ||
+ | final int CAM_HEIGHT = 240; | ||
+ | final int CAM_FPS = 15; | ||
+ | |||
+ | // Rendering parameters: number of cells the rendered | ||
+ | // image should be in each direction: | ||
+ | final int REDUCED_WIDTH = 128; | ||
+ | final int REDUCED_HEIGHT = 96; | ||
+ | |||
+ | void setup() { | ||
+ | frameRate(CANVAS_FPS); | ||
+ | size(CANVAS_WIDTH, | ||
+ | |||
+ | if (Capture.list().length == 0) { | ||
+ | println(" | ||
+ | exit(); | ||
+ | } | ||
+ | |||
+ | // Instantiate a buffer image used for subsampling, | ||
+ | img = createImage(REDUCED_WIDTH, | ||
+ | |||
+ | // Instantiate a new Capture object, requesting the specs: | ||
+ | cam = new Capture(this, | ||
+ | cam.start(); | ||
+ | } | ||
+ | |||
+ | void draw() { | ||
+ | // Grab a frame | ||
+ | if (cam.available() == true) { | ||
+ | cam.read(); | ||
+ | } | ||
+ | |||
+ | // The following should work but doesn' | ||
+ | // cam.resize(REDUCED_WIDTH, | ||
+ | |||
+ | // So we use an interim buffer image instead: | ||
+ | img.copy(cam, | ||
+ | |||
+ | // And draw the image (full canvas): | ||
+ | image(img, 0, 0, width, height); | ||
+ | filter(GRAY); | ||
+ | } | ||
+ | </ | ||
+ | |||
+ | This version walks through each pixel in the reduced image and renders its value as an NxN block. (N is called OUTPUT_SCALE in the code below.) | ||
+ | |||
+ | <file java capture_reduce_res-2.pde> | ||
+ | /** Reduce resolution of captured image. Display results in blocks. | ||
+ | * @author Mithat Konar | ||
+ | */ | ||
+ | |||
+ | import processing.video.*; | ||
+ | |||
+ | // === Program constants === // | ||
+ | // Rendering parameters: | ||
+ | // number of cells the rendered image should be in each direction: | ||
+ | final int REDUCED_WIDTH = 32; | ||
+ | final int REDUCED_HEIGHT = 24; | ||
+ | |||
+ | // Canvas parameters: | ||
+ | // number of times you want REDUCED image blown up: | ||
+ | final int OUTPUT_SCALE = 10; | ||
+ | // frame rate of rendered output: | ||
+ | final int CANVAS_FPS = 8; | ||
+ | |||
+ | // Video capture parameters | ||
+ | // (adjust as neeeded for your platform' | ||
+ | final int CAM_WIDTH = 320; | ||
+ | final int CAM_HEIGHT = 240; | ||
+ | final int CAM_FPS = 15; | ||
+ | |||
+ | // === Global variables ===// | ||
+ | Capture cam; // The video capture device. | ||
+ | PImage img; // Buffer image. | ||
+ | |||
+ | // === GO! === // | ||
+ | void setup() { | ||
+ | frameRate(CANVAS_FPS); | ||
+ | size(REDUCED_WIDTH*OUTPUT_SCALE, | ||
+ | |||
+ | if (Capture.list().length == 0) { | ||
+ | println(" | ||
+ | exit(); | ||
+ | } | ||
+ | |||
+ | // Instantiate a buffer image used for subsampling, | ||
+ | img = createImage(REDUCED_WIDTH, | ||
+ | |||
+ | // Instantiate a new Capture object, requesting the specs: | ||
+ | cam = new Capture(this, | ||
+ | cam.start(); | ||
+ | } | ||
+ | |||
+ | void draw() { | ||
+ | // Grab a frame | ||
+ | if (cam.available() == true) { | ||
+ | cam.read(); | ||
+ | } | ||
+ | |||
+ | // Using a buffer because | ||
+ | // cam.resize(REDUCED_WIDTH, | ||
+ | // doesn' | ||
+ | img.copy(cam, | ||
+ | img.loadPixels(); | ||
+ | |||
+ | // For each column in img: | ||
+ | for (int col = 0; col < REDUCED_WIDTH; | ||
+ | // For each row in img: | ||
+ | for (int row = 0; row < REDUCED_HEIGHT; | ||
+ | // Get color from pixel at col, row | ||
+ | color c = img.pixels[col + row*img.width]; | ||
+ | fill(c); | ||
+ | stroke(c); | ||
+ | rect(col*OUTPUT_SCALE, | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | |||
+ | </ | ||
+ | |||
+ | You can render grayscale instead of color above by replacing the line <code java> | ||
+ | |||
+ | |||
+ | ===== Brightest pixel ===== | ||
+ | The following example takes the reduced resolution camera feed above and draws a colored circle inside the brightest pixel found. If there is more than one pixel with the same brightness, then it shows the first one (scanning from top-left to bottom-right). It also adds some code to indicate frame timing. | ||
+ | |||
+ | <file java capture_brightest_pixel.pde> | ||
+ | /** Reduce resolution of captured image and indicate brightest pixel. | ||
+ | * @author Mithat Konar | ||
+ | */ | ||
+ | |||
+ | import processing.video.*; | ||
+ | |||
+ | // === Program constants === // | ||
+ | // Rendering parameters: | ||
+ | // number of cells the rendered image should be in each direction: | ||
+ | final int REDUCED_WIDTH = 32; | ||
+ | final int REDUCED_HEIGHT = 24; | ||
+ | |||
+ | // Canvas parameters: | ||
+ | // number of times you want REDUCED image blown up: | ||
+ | final int OUTPUT_SCALE = 20; | ||
+ | // frame rate of rendered output: | ||
+ | final int CANVAS_FPS = 6; // should divide evenly into CAM_FPS | ||
+ | // to avoid jitter. | ||
+ | |||
+ | // Video capture parameters | ||
+ | // (adjust as neeeded for your platform' | ||
+ | final int CAM_WIDTH = 320; | ||
+ | final int CAM_HEIGHT = 240; | ||
+ | final int CAM_FPS = 30; | ||
+ | |||
+ | // === Global variables ===// | ||
+ | Capture cam; // The video capture device. | ||
+ | PImage img; // Buffer image. | ||
+ | |||
+ | int blink_state = 0; | ||
+ | |||
+ | // === GO! === // | ||
+ | void setup() { | ||
+ | frameRate(CANVAS_FPS); | ||
+ | size(REDUCED_WIDTH*OUTPUT_SCALE, | ||
+ | ellipseMode(CORNER); | ||
+ | |||
+ | if (Capture.list().length == 0) { | ||
+ | println(" | ||
+ | exit(); | ||
+ | } | ||
+ | |||
+ | // Instantiate a buffer image used for subsampling, | ||
+ | img = createImage(REDUCED_WIDTH, | ||
+ | |||
+ | // Instantiate a new Capture object, requesting the specs: | ||
+ | cam = new Capture(this, | ||
+ | cam.start(); | ||
+ | } | ||
+ | |||
+ | void draw() { | ||
+ | int brightestCol = 0; | ||
+ | int brightestRow = 0; | ||
+ | float bightestIntensity = 0.0; | ||
+ | | ||
+ | // Grab a frame | ||
+ | if (cam.available() == true) { | ||
+ | cam.read(); | ||
+ | } | ||
+ | |||
+ | // We are using a buffer img because | ||
+ | // cam.resize(REDUCED_WIDTH, | ||
+ | // doesn' | ||
+ | img.copy(cam, | ||
+ | img.loadPixels(); | ||
+ | |||
+ | // For each column in img: | ||
+ | for (int col = 0; col < REDUCED_WIDTH; | ||
+ | // For each row in img: | ||
+ | for (int row = 0; row < REDUCED_HEIGHT; | ||
+ | // Draw the pixel intensity. | ||
+ | float pixelIntensity = brightness(img.pixels[col + row*img.width]); | ||
+ | fill(pixelIntensity); | ||
+ | stroke(pixelIntensity); | ||
+ | rect(col*OUTPUT_SCALE, | ||
+ | |||
+ | // Determine whether this is the brightest pixel in this frame. | ||
+ | if (pixelIntensity > bightestIntensity) | ||
+ | { | ||
+ | bightestIntensity = pixelIntensity; | ||
+ | brightestCol = col; | ||
+ | brightestRow = row; | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | |||
+ | // Highlight brightest pixel. | ||
+ | fill(# | ||
+ | stroke(# | ||
+ | strokeWeight(2); | ||
+ | ellipse(brightestCol*OUTPUT_SCALE, | ||
+ | | ||
+ | | ||
+ | // Frame timer | ||
+ | fill(# | ||
+ | stroke(# | ||
+ | strokeWeight(1); | ||
+ | blink_state = ++blink_state % CANVAS_FPS; | ||
+ | rect(0, 0, blink_state*OUTPUT_SCALE, | ||
+ | }</ |
processing/video_capture.1365725785.txt.gz · Last modified: 2013/04/12 00:16 by mithat