For this recitation I used Arduino to control the image showed inn processing.
Here is the demo:
Arduino code:
// IMA NYU Shanghai
// Interaction Lab
// This code sends one value from Arduino to Processing
void setup() {
Serial.begin(9600);
}
void loop() {
int sensorValue = analogRead(A0) / 4;
Serial.write(sensorValue);
// too fast communication might cause some latency in Processing
// this delay resolves the issue.
delay(10);
}
Processing code:
import processing.video.*;
Capture myCam;
import processing.serial.*;
Serial myPort;
int valueFromArduino;
//PImage myImg;
int size;//we can change the size
void setup(){
size(480, 300);
//myImg = loadImage(“1.jpg”);
myCam = new Capture(this, 480, 300);
myCam.start();
printArray(Serial.list());
myPort = new Serial(this, Serial.list()[ 5 ], 9600);
myPort.clear();
}
void draw(){
if(myCam.available()){
myCam.read();
}
background(0);
noStroke();
myCam.loadPixels();
while ( myPort.available() > 0) {
valueFromArduino = myPort.read();
}
println(valueFromArduino);
if(valueFromArduino < 100){
size =50;
}else {
size=10;
}
for( int x = 0; x < myCam.width; x = x + size){//for loop, it will draw lots of squares on top of the previous one
//”size” here is a variable
for( int y = 0; y < myCam.height; y = y + size){
int i = (y*myCam.width)+x;
fill(myCam.pixels[i]);
float d = dist(x, y, valueFromArduino, valueFromArduino/2);//you can change the mouseX, mouseY to the value from arduino
d = map(d, 0, sqrt(width*width+height*height), 1, size*2);
pushMatrix();
float angle = map(d, 1, size*2, 0, PI);
translate(x, y);
rotate(angle);
ellipse(0, 0, 3*d, d );
popMatrix();
}
}
//colorMode(HSB, 100);
//fill(x*100/width, y*100/height, 100);
}
The technology used in my project, I think, is very interactive, because in my project the vision we see one the screen is controlled by the potentiometer which is controlled by the user.