m8ta
use https for features.
text: sort by
tags: modified
type: chronology

{644} hide/ /
Given proper implementation of fixed-point sine and cosine, we present the C portion of our microstepping controller below, hopefully commented to a sufficient extent to make it intelligible and to document the logic behind our design.

#include <msp430x54x.h>
#define INCR 1
#define PI2 (12868) //we use a 11 bit fixed-point representationf of motor angles.  
	// 12867 = 2*pi*2^11.  
#define TIMER_SCL	128
#define SHUTTER_CLOSED 	0 //defines used in the state-machine control of the shutter. 
#define SHUTTER_OPEN	1
#define SHUTTER_SMALL	2

	//Start of Stepper Motor Program
	//ME270 Project

short theta1 = 0; //the MSP430 is a 16-bit processor. 
short theta2 = 0; //hence, most datatypes should be 'short' for efficiency.
short theta1_v = 0; //we cannot control the poition of the stepper motors directly, 
short theta2_v = 0; // since we only have 3 bits of control from the parallel port. 
// also, direct position control of postion would cause the stepper to miss steps 
// smooth position change and limited acceleration is guaranteed by controlling the velocity. 

short shutter_state = 0; // none of the steppers have encoders, 
	// so we keep track of the shutter position here.  
short shutter_cmd ;  //this is set in ISR (interupt service routine) 
	// and read, and acted upon, in the main loop. 

short qsin(short i){
	//i goes from  0 pi/2 base 11 or... 
	// 0 to 3217
	short cube, fifth, result; 
	cube = mply_11(i,i);
	cube = mply_11(cube, i); 
	fifth = cube; 
	fifth = mply_11(fifth, i);
	fifth = mply_11(fifth, i); 
	//our approximation to sine based on taylor series: 
	//original: sin(x) = x - x^3/3! + x^5/5!
	//sin(x) = x - x^3/(8+32+128) + x^5/128 
	result = i - 
		((cube >> 3) + (cube >> 5) + (cube >> 7)) + (fifth >> 7);
	//result is base 11.  need it to be base 7. (0 to 127)
	result = result >> 4; 
	if(result > TIMER_SCL) result = TIMER_SCL; 
	return (short)result; 
}

// this is an even more simplified version of sin - 
// made in an attempt to make the microstepping smoother. 
// it turned out to not matter very much.  to get very smooth stepping , 
// may have to develop an inverse model of the nonlinear stepper motors 
// ** quantitatively **
short qsin2(short i){
	//i goes from  0 pi/2 base 11 or... 
	// 0 to 3217
	short cube, result; 
	cube = mply_11(i,i);
	cube = mply_11(cube, i); 
	//our approximation to sine based on taylor series: 
	//original: sin(x) = x - x^3/3! + x^5/5!
	//sin(x) = x - x^3/(8+32+128) + x^5/128 
	result = i - (cube >> 3) ;
	//result is base 11.  need it to be base 7. 
	result = result >> 4; 
	if(result > TIMER_SCL) result = TIMER_SCL; //maximum..
	return (short)result; 
}

short isin(short i){
	// i is base 2^11
	//but we accept 0 to 2*pi or 12867
	if(i >= 0 && i < 3217) return qsin(i); 
	else if(i >= 3217 && i < 6434) return qsin(6434 - i); 
	else if(i >= 6434 && i < 9651) return -1*qsin(i - 6434); 
	else if(i >= 9651 && i < 12867) return -1*qsin(12867 - i); 
	else return 0; 
}

short icos(short i){
	// i is base 2^11
	//but we accept 0 to 2*pi or 12867
	if(i >= 0 && i < 3217) return qsin(3217 - i); 
	else if(i >= 3217 && i < 6434) return -1*qsin(i - 3217); 
	else if(i >= 6434 && i < 9651) return -1*qsin(9651 - i); 
	else if(i >= 9651 && i < 12867) return qsin(i - 9651); 
	else return 0; 
}

//this interrupt is triggered by the parallel port. 
//because we only have 3 lines and need 8 commands, 
//after triggering (0 to 1 transition on PORT2.0 ), 
//the control program on the PC will either hold the pin up 
// (indicating a velocity step command) or drop it (indicating shutter/stop cmd). 

#pragma vector=PORT2_VECTOR
__interrupt void port2_ISR (void)
{
	//need to read the two pins to figure out which axis to change.
	short k; 
	for(k=0; k<8; k++){
		P9OUT ^= 0x4;	
	}
	switch(P2IN & 0x7){
		case 1: theta1_v += INCR;  break; 
		case 3: theta1_v -= INCR;  break; 
		case 5: theta2_v += INCR;  break; 
		case 7: theta2_v -= INCR;  break; 
		case 0: shutter_cmd = SHUTTER_CLOSED; break; 
		case 2: shutter_cmd = SHUTTER_SMALL; break; 
		case 6: shutter_cmd = SHUTTER_OPEN; break; 
		case 4: theta1_v = 0; theta2_v = 0; break;
	}
	P2IFG = 0; //clear the interupt. 
}

#pragma vector=TIMER1_A0_VECTOR 
__interrupt void timera1_ISR (void)
{
	return; //if this vector is not here and the interupt is enabled, then the proc will crash! 
}
// have to integrate the velocity at a consistent rate, 
// hence we pushed integration as well as the sine/cosine computation 
// into this timer interrupt. 
#pragma vector=TIMER1_A1_VECTOR 
__interrupt void timera11_ISR (void)
{
	short ps, pc; 
	P1OUT ^= 0xFF; //toggle P1 to indicate the update rate. (the led is there)
	TA1CTL = 0x0004; //reset counter
	TA1CTL = 0x0112; //turn back in interrupts.	

	theta1 += theta1_v; 
	theta2 += theta2_v;
	
	if(theta1 > PI2) theta1 -= PI2; 
	if(theta1 < 0) theta1 += PI2; 

	if(theta2 > PI2) theta2 -= PI2; 
	if(theta2 < 0) theta2 += PI2; 

	ps = isin(theta1)+TIMER_SCL;
	pc = icos(theta1)+TIMER_SCL;

	TA0CCR1 = ps; //update the counter (PWM output) registers. 
	TA0CCR4 = pc;

	ps = isin(theta2)+TIMER_SCL;
	pc = icos(theta2)+TIMER_SCL;
	
	TA0CCR2 = ps; 
	TA0CCR3 = pc;
	P1OUT ^= 0xFF; //toggle P1 to indicate the update rate. (the led is there)
	return;
}

//delay is used in moving the shutter. 
// too short, and the stepper motor controlling the shutter will skip steps! 
void delay(short time){
	short k; 
	short j = 0; 
	for(k=0; k<time; k++){
		j++; 
		j++; 
	}
}

void delay_long(short time){
	short k,j,f; 
	for(k=0; k<time; k++){
		f = 0; 
		for(j=0; j<100; j++){
			f++; 	
		}
	}
}

// ideally, we would ramp the shutter velocity up and down to maximize speed 
// and minimize time - constant velocity works fine and fast. 
#define SHUTDLY	1600
void shutter_ccw(void){
	P9OUT = 0x1; 
	delay(SHUTDLY); 
	P9OUT = 0x3; 
	delay(SHUTDLY); 
	P9OUT = 0x2; 
	delay(SHUTDLY); 
	P9OUT = 0x0; 
	delay(SHUTDLY); 	
}

void shutter_cw(void){
	P9OUT = 0x2; 
	delay(SHUTDLY); 
	P9OUT = 0x3; 
	delay(SHUTDLY); 
	P9OUT = 0x1; 
	delay(SHUTDLY); 
	P9OUT = 0x0; 
	delay(SHUTDLY); 	
}

void shutter_open(void){
	short i; 
	if(shutter_state == SHUTTER_CLOSED){
		//I was quite pleased to discover that the three shutters states
		// are exactly 5 full steps apart! 
		for(i=0; i<5; i++){ 
			shutter_ccw(); 	
		}

	}
	if(shutter_state == SHUTTER_SMALL){
		for(i=0; i<5; i++){
			shutter_cw(); 	
		}
	}
	shutter_state = SHUTTER_OPEN; 
}
void shutter_close(void){
	short i; 
	if(shutter_state == SHUTTER_OPEN){
		for(i=0; i<5; i++){
			shutter_cw(); 	
		}
	}
	if(shutter_state == SHUTTER_SMALL){
		for(i=0; i<10; i++){
			shutter_cw(); 	
		}
	}
	shutter_state = SHUTTER_CLOSED; 
}
void shutter_small(void){
	short i; 
	if(shutter_state == SHUTTER_OPEN){
		for(i=0; i<5; i++){
			shutter_ccw(); 	
		}
	}
	if(shutter_state == SHUTTER_CLOSED){
		for(i=0; i<10; i++){
			shutter_ccw(); 	
		}
	}
	shutter_state = SHUTTER_SMALL; 
}

void main (void){
	//SR = 0x02;
	short t1, t2, ps, pc; 
	short k; 
	WDTCTL = WDTPW + WDTHOLD; //stop the watchdog timer.
	// UCSCTL = Universal clock system control (registers). 
	// sets the core clock of the device. 
	_bis_SR_register(SCG0);
	//SR = SR | 0x40 ;
	// setup the digitally controlled oscillator. 
	UCSCTL0 = 0x0700; //DCO = 5, MOD = 0
	UCSCTL1 = 0x0060; //DCORSEL = 3, middle freq. 
	UCSCTL2 = 0x101F; //FLL (frequency locked loop); doesnt matter here.
	UCSCTL4 = 0x0333; //select DCOCLK for all clock sources 
	UCSCTL0 = 0x0900; //DCO = 9, MOD = 0
		// DCO = internal high-frequency oscillator). 
	//UCSCTL5 = 0x0000; 

	// setup timer A (for controlling PWM outputs)
	TA0CCR0 = TIMER_SCL*2+1; //Timer A end pulse
	TA0CCR1 = TIMER_SCL; //Timer A start pulse
	TA0CCR2 = TIMER_SCL; //Timer A start pulse
	TA0CCR3 = TIMER_SCL; //Timer A start pulse
	TA0CCR4 = TIMER_SCL; //Timer A start pulse
	TA0CTL = 0x0110; //TASSEL = ACLK; input divider=1; MCx = count up mode; 
	TA0CCTL0 = 0x0040; //sets the mode of the output: here=2, toggle/reset.
		// (produces pulses at 8Khz). 
	TA0CCTL1 = 0x00C0; //output mode: toggle/set. 
	TA0CCTL2 = 0x00C0; // same
	TA0CCTL3 = 0x00C0; // same
	TA0CCTL4 = 0x00C0; // same

	//setup timer B (for controlling theta & velocity updates). 
	TA1CCR0 = 10000; //Timer A end pulse
	TA1CCR1 = 5000; //Timer A start pulse
	TA1CCR2 = 5000; //Timer A start pulse	
	TA1CTL = 0x0004;
	TA1CTL = 0x0112; //TASSEL = ACLK; input divider=1; MCx = count up mode; 
	TA1CCTL0 = 0x0050; //sets the mode of the output: here=2, toggle/reset.
		// (produces pulses at 8Khz). 
	TA1CCTL1 = 0x00C0; //output mode: toggle/set. 
	TA1CCTL2 = 0x00C0; // same
	
	P1DIR = 0x03; //P1.0 and P1.1 to output. (the LED)
	P1SEL = 0x00;
	P1OUT = 0; 
		
	P8DIR = 0xFF; //for some reason Port 1 does not work for the dev board --  
	P8SEL = 0x7F; //hence, we switched over to Port 8, which also is connected 
	// to the timers. 
	// the P8SEL register selects the timer output as opposed to general purpose IO. 
	
	//setup port2 (computer command via parallel port) interrupt. 
	P2DIR = 0; //all inputs. 
	P2IFG = 0;
	P2IE = 0x01; //enable interupts on P2.0
	P2IES = 0; // low to high edge detect. 
	_bis_SR_register(GIE | SCG0 );  //enable interrupts, dont go to sleep.
	
	P9DIR = 0x07; 
	
	//test the shutter. first move all the way to the limit.
	// since we do not know the initial position.
	for(k=0; k<30; k++){
		shutter_cw(); 
	}
	shutter_ccw();  //step one out so we don't hit the limits later.
	shutter_state = SHUTTER_CLOSED; //this is where the init will leave it
	while(1){
		//now just sit here waiting for a shutter command from the 
		// parallel port ISR.  
		if(shutter_state != shutter_cmd){
			switch(shutter_cmd){
				case SHUTTER_OPEN: shutter_open(); break; 
				case SHUTTER_CLOSED: shutter_close(); break; 
				case SHUTTER_SMALL: shutter_small(); break;
			} 	
		}
	}
}

We has some problem getting Port1 to work, hence had to use Port8 to get the PWM signals out -- see below. Note the shutter does not need to be microstepped and instead can be controlled as per a conventional stepper motor. The pins used are marked in yellow; pin 57 is a simple pulse output.

Note that each PWM signal is generated from the same timer counter, hence they are synchronous (this is of course not necessary). Below are some examples of two phases of one motor output; in real life, of course, the PWM ratio continually changes.

Below is the development board being probed to produce the plots above.

We then used the pinouts of the NXP microcontrollers, intuited last time by probing their on-line function (specifically the auto-calibration sequence upon startup), to wire up a harness for external control of the H-bridges. Below is a picture of that (we removed the original microcontrollers to prevent contention), and the associated wiring labels.

The next task was to make a 5V to 3.2V regulator (the MSP430 only runs at 3.2V, max), nothing major just a LM317. See below.

Finally, everything was wired together. The 3.2 V supply was jumpered, as the MSP430 USB programmer provided its own power. (Amazingly, the ultra low power MSP430 could run on power from the parallel port, too!)

A level translator is needed to properly interface the 3.2V MSP430 to the 5V parallel port - we canibalized a spare JTAG adapter for this purpose. It was this that limited us to only 3 bits of control.

And that, minus the difficulty we had getting the compiler working properly, is the entirety of the microcontroller part of the project.

{648} hide/ /

{1491}
hide / / print
ref: -0 tags: dLight1 dopamine imaging Tian date: 12-05-2019 17:27 gmt revision:0 [head]

PMID-29853555 Ultrafast neuronal imaging of dopamine dynamics with designed genetically encoded sensors

  • cpGFP based sensor. ΔF/F~3\Delta F / F ~ 3 .

{1480}
hide / / print
ref: -0 tags: betzig lattice light sheet date: 09-18-2019 18:32 gmt revision:0 [head]

PMID-25342811 Lattice Light Sheet Microscopy: Imaging Molecules to Embryos at High Spatiotemporal Resolution

{1450}
hide / / print
ref: -2015 tags: conjugate light electron tomography mouse visual cortex fluorescent label UNC cryoembedding date: 03-11-2019 19:37 gmt revision:1 [0] [head]

PMID-25855189 Mapping Synapses by Conjugate Light-Electron Array Tomography

  • Use aligned interleaved immunofluorescence imaging follwed by array EM (FESEM). 70nm thick sections.
  • Of IHC, tissue must be dehydrated & embedded in a resin.
  • However, the dehydration disrupts cell membranes and ultrastructural details viewed via EM ...
  • Hence, EM microscopy uses osmium tetroxide to cross-link the lipids.
  • ... Yet that also disrupt / refolds the poteins, making IHC fail.
  • Solution is to dehydrate & embed at cryo temp, -70C, where the lipids do not dissolve. They used Lowicryl HM-20.
  • We show that cryoembedding provides markedly improved ultrastructure while still permitting multiplexed immunohistochemistry.

{1436}
hide / / print
ref: -0 tags: Airy light sheet microscopy attenuation compensation LSM imaging date: 02-19-2019 04:51 gmt revision:1 [0] [head]

Light-sheet microscopy with attenuation-compensated propagation-invariant beams

  • Ah ... beautiful illustration of the airy light sheet concept.
  • In practice, used a LCOS SLM to generate the beam (as .. phase matters!) plus an AOM to scan the beam.
    • Microscope can operate either in SPIM (single plane imaging microscope) or DSLM (digital scanning light sheet microscope),
  • Improves signal-to-background ratio (SBR) and contrast-to-noise ratio (CNR) (not sure why they don't use SNR..?)

{1430}
hide / / print
ref: -2017 tags: calcium imaging seeded iterative demixing light field microscopy mouse cortex hippocampus date: 02-13-2019 22:44 gmt revision:1 [0] [head]

PMID-28650477 Video rate volumetric Ca2+ imaging across cortex using seeded iterative demixing (SID) microscopy

  • Tobias Nöbauer, Oliver Skocek, Alejandro J Pernía-Andrade, Lukas Weilguny, Francisca Martínez Traub, Maxim I Molodtsov & Alipasha Vaziri
  • Cell-scale imaging at video rates of hundreds of GCaMP6 labeled neurons with light-field imaging followed by computationally-efficient deconvolution and iterative demixing based on non-negative factorization in space and time.
  • Utilized a hybrid light-field and 2p microscope, but didn't use the latter to inform the SID algorithm.
  • Algorithm:
    • Remove motion artifacts
    • Time iteration:
      • Compute the standard deviation versus time (subtract mean over time, measure standard deviance)
      • Deconvolve standard deviation image using Richardson-Lucy algo, with non-negativity, sparsity constraints, and a simulated PSF.
      • Yields hotspots of activity, putative neurons.
      • These neuron lcoations are convolved with the PSF, thereby estimating its ballistic image on the LFM.
      • This is converted to a binary mask of pixels which contribute information to the activity of a given neuron, a 'footprint'
        • Form a matrix of these footprints, p * n, S 0S_0 (p pixels, n neurons)
      • Also get the corresponding image data YY , p * t, (t time)
      • Solve: minimize over T ||YST|| 2|| Y - ST||_2 subject to T0T \geq 0
        • That is, find a non-negative matrix of temporal components TT which predicts data YY from masks SS .
    • Space iteration:
      • Start with the masks again, SS , find all sets O kO^k of spatially overlapping components s is_i (e.g. where footprints overlap)
      • Extract the corresponding data columns t it_i of T (from temporal step above) from O kO^k to yield T kT^k . Each column corresponds to temporal data corresponding to the spatial overlap sets. (additively?)
      • Also get the data matrix Y kY^k that is image data in the overlapping regions in the same way.
      • Minimize over S kS^k ||Y kS kT k|| 2|| Y^k - S^k T^k||_2
      • Subject to S k>=0S^k &gt;= 0
        • That is, solve over the footprints S kS^k to best predict the data from the corresponding temporal components T kT^k .
        • They also impose spatial constraints on this non-negative least squares problem (not explained).
    • This process repeats.
    • allegedly 1000x better than existing deconvolution / blind source segmentation algorithms, such as those used in CaImAn

{649}
hide / / print
ref: notes-0 tags: ME270 project light power date: 12-08-2008 19:42 gmt revision:2 [1] [0] [head]

ME 270 Final Project - Optical Power Transfer

The purpose of this project was to develop a means for delivering optical power to the top of monkey unconstrained within his cage. This power will ultimately be used to recharge the batteries on a small neural telemetry device that we are developing. To deliver the power, we decided to use a moving-mirror DJ light controlled through a microcontroller in turn controlled via a video tracking computer. Hence, out report will be broken into three sections: the light, microcontroller, and video tracking.

{624} hide/ /

Section 1 : Reverse-engineering the light, part selection

Rather than engineering our own articulated spotlight, we elected to buy a moving-mirror DJ light; creating our own light source would simply have taken too much time, and demanded optical experience that we lack. After a brief survey, we bought an Elekralite mm150 (moving mirror, 150W bulb) DJ light, one of the cheapest on the market ($650); despite this, it is very well constructed, as you will see below. The light was originally controlled through the stage-specific DMX bus, but the possibility of directly controlling the light through this was discarded after we learned that the resolution on each axis is only 8 bits (+- 127); given the large range of the pan mirror, this is insufficient for tracking. Hence we decided to reverse engineer the light to determine the best way to directly control the mirror and shutter.
  • Overview, with dichroic color filter wheel and globos removed. We ended up putting these back in, as without them the unit does not initialize properly.
  • Another view with the shell taken off. The white ballast on the bottom powers the 150W HMI metal-halide bulb, along with a stabilization capacitor (to the left of the ballast) and starter (below the capacitor). The starter produces 2kV to initially ionize the gas in the bulb; there is no third starter electrode as in mercury vapor bulbs, as this would be a point of stress in the high-pressure (90atm) fused quartz arc tube. The bulb is on whenever the unit is plugged in, and the light output is controlled via the shutter. The top large transformer (+20V, +12V) is used to drive the display/control unit (left center) and the 6 stepper motors in the lamp, which are:
    • mirror tilt
    • mirror pan
    • shutter
    • globo selection
    • globo rotate
    • colorwheel
    • Detail of the breakout board for the 6 stepper motors, with the colorwheel stepper above it. The stepper motors are two-phase, permanent-magnet rotor types; we're not sure of the steps/revolution yet, though they all seem different. The breakout board also has attachments for the hall-effect sensors to detect orientation of the globo and color wheels.
    • The dichroic mirror color wheel. Regular filters would undoubtedly melt here - a pencil put into the beam of the HMI lamp will light on fire!
    • The globo wheel. Each globo is mounted to a gear which engages a center gear on the same axis as this plate. This center gear is driven by the globo rotate stepper motor.
    • Dichroic IR filter. Note that since it is viewed at an angle, the reflective wavelength goes up, and so you can see a slight red tint in the reflections off this. Removed this, since silicon photovoltaic cells respond to near IR energy.
    • Stepper motor controller. The three chips at the bottom are NXP 87LPC768 OTP microcontroller. Each can control two stepper motors, and each has an internal program for self-calibration using either the hall-effect sensors or hard mechanical limits on motor range. Upper chip is for the mirror (U3). These chips are controlled via one high-speed serial bus from the upper control board (the one with the LED display). The six upper chips, with heat-sinks on them, contain dual H-bridges, probably similar to the Allegro A3955 PWM microstepping motor driver. Between them are two standard 74HC04 hex inverters to drive the opposite leg of the h-bridges (each 87LPC768 has four outputs, two per motor, whereas each motor requires four inputs, two per phase. In this way, you can switch the direction of current flow through each motor phase with a single +20v supply rail.)
    • The Elekralite mm150 without the two control boards, globo wheel, or filter wheel. You can see the lamp, focus adjustment screws and reflector. Below the lamp is a 24V radial fan.
    • The Elektralite powered on, with globo and filter wheel back in place. The lamp is very, very bright!
    • Close-up of filter and globo wheels. The shutter has been removed; it fits on the shaft just above the long hex spacer. Just above that is the mechanical hard stop for the shutter.
    • The pin on the main controller that is used to control the 6 stepper axes. We tried probing it for some time, but could not directly figure out the serial protocol without a good logic analyzer (which is at home), so decided to implement our own microstepping PWM controller, and drive the inverters / H-bridges driectly.
    • The shutter, after being used for a good bit. The UV and IR from the lamp (mostly the IR, I think - the shutter gets really hot!) bleaches the black anodization on this bit of aluminum. We've drilled a 1/4" hole in one side to try to get a more tightly-collated beam.
    • The PWM waveform at the motor for the two phases (the glare at the left is from the light, which was of course on!). The other leads in the motor are simply the inverse of these via the 74HC04. For vector control like this, a 50% on/50% off PWM effectively sets the phase current to zero (the motor has sufficient leakage inductance at the 16kHz PWM frequency to keep di/dt low over multiple PWM periods). Hence, to control the motors we need something like:
      phase1 = 0.5*sin(theta) + 0.5 ; phase2 = 0.5*cos(theta) + 0.5
      , where theta is changed based on the control algorithm, and phase is scaled and used to set the PWM counter compare register. See below.
    • Microstepping control scheme based on vector control of current in the two phases of these stepper motors. As before, current will be regulated via PWM @ 16kHz, as the Elektralite engineers do, as opposed to linear current control. To generate the appropriate 6 PWM signals (mirror pan, mirror tilt, and shutter), we used a MSP430 microcontroller with 3 16-bit counters and 5+ capture-compare registers.
    • We used a MSP430F5xxx development board from TI -- see the next section. The device features sufficient timers and PWM outputs to control the stepper motors in the Elekralite. We will control motion of each using a simple up-down toggling via the parallel port to increment/decrement the motor theta variable. The microcontroller will need a look-up table or algorithm for calculating the sines and cosines of rotor angles.
    • small solar cell from Spark Fun Electronics. When placed in the beam of the light, it is possible to generate 500ma short-circuit current and 5V open-circuit voltage, but not at the same time. In a solar cell composed of a series of junctions, such as this, the total current at full voltage is limited by the minimum current from each of the junctions (they are in series!). Therefore to get maximum current you need very uniform illumination, something that is normally not difficult with the sun, but which will be difficult in this application. To avoid this problem we will try to procure higher efficiency single-junction GaAs concentrator cells, which can be up to ~40% efficient (as compared to this one, which is ~15% efficient).

Section 2: Microcontroller & microstepping

As mentioned before, we chose MSP430F5438 100-pin 16 bit microcontroller because it offers sufficient timers and speed for our problem and because we were both familiar with the architecture. Four 16-bit timers are used to control microstepping mirror tilt and pan, since the stepper motors have two phases. The microcontroller only needs to provide digital signals; current is provided through H-bridge drivers in the control board of the mm-150 - the DIPs with heat sinks below.

Opposite sides of the H-bridge are driven via hex inverters; hence, we only have to supply two PWM signals per motor, one per phase. Setting the PWM duty cycle to 50% will set the motor phase current to zero; by vectoring the duty cycle proportional to the sine and cosine of theta, where theta is the orientation of the motor * number of poles of the stepper, you can control microstepping. That, of course, is simplified; in practice, there are many details to contend with, namely:

  • The MSP430 is a 16 bit integer processor; hence, fractional values must be represented as fixed-point numbers. We chose 11-bit fixed point to optimize resolution and prevent overflow.
  • There is no divide. Division can be accomblished via bit-shifts (which is equivalent to dividing by 2), and addition.
  • There is one 16*16 -> 32 bit multiplier used as a peripheral in the MSP430 architecture, but the standard C library does not access it properly for our fixed-point math.
  • We have to approximate sine and cosine using fixed-point math, since the standard C library Sin and cos each take more than 2500 cycles -- too long when the microcontroller is only running at 12Mhz, and we want to update 4 microstepping phases at ~ 40Khz.

{646} hide/ /

We approximated sine and cosine, needed to vector the stepper motor phase currents, in fixed-point arithmetic first in C on linux - where the results could be plotted in matlab - before converting to MSP430 code. Since the trigonometric functions are repeating, we only need a polynomial approximation of sine from 0 to pi/2. The taylor series for sine is

 sin(x) = x - x^3/3! + x^5/5! - x^7/7! ...
; a quick check in matlab showed that the first three terms are enough to get an accurate approximation in the domain of interest. The MSP430 does not have division, however, so we approximated 1/3! = 1/6 as (1/8 + 1/32 + 1/128) and 1/5! = 1/120 as 1/128; division by powers of two is possible with right bit-shift operations. We chose base 11 (5 bits whole, 11 bits fractional) representation to avoid overflow: if 2^11 -> 1, we need to represent (pi/2)^5 -> 9.5 ; ceil(log_2(9.5)) = 4 (plus one bit for safety). The C program below shows this test.

#include <stdio.h>

char qsin(short i){
       //i goes from  0 pi/2 base 11 or...
       // 0 to 3217
       unsigned int cube, fifth, result;
       cube = (i*i) >> 11;
       cube = (cube*i) >> 11; //max = 7937
       fifth = (cube*i) >> 11;
       fifth = (fifth*i) >> 11; // max = 19585
       //our approximation to sine based on taylor series:
       //original: sin(x) = x - x^3/3! + x^5/5!
       //sin(x) = x - x^3*(1/8+1/32+1/128) + x^5*(1/128)
       result = (unsigned int)i -
               ((cube >> 3) + (cube >> 5) + (cube >> 7)) + (fifth >> 7);
       //result is base 11.  need it to be base 7.
       result = result >> 4;
       if(result > 127) result = 127;
       return (char)result;
}
//this is tricky, as it involves shifts, x-inversions, and y-inversions. 
//but it all makes sense if you plot out the respective functions. 
char isin(short i){
       // i is base 2^11
       //but we accept 0 to 2*pi or 12867
       if(i >= 0 && i < 3217) return qsin(i);
       else if(i >= 3217 && i < 6434) return qsin(6434 - i);
       else if(i >= 6434 && i < 9651) return -1*qsin(i - 6434);
       else if(i >= 9651 && i < 12867) return -1*qsin(12867 - i);
       else return 0;
}
char icos(short i){
       // i is base 2^11
       //but we accept 0 to 2*pi or 12867
       if(i >= 0 && i < 3217) return qsin(3217 - i);
       else if(i >= 3217 && i < 6434) return -1*qsin(i - 3217);
       else if(i >= 6434 && i < 9651) return -1*qsin(9651 - i);
       else if(i >= 9651 && i < 12867) return qsin(i - 9651);
       else return 0;
}

int main(void){
	short i; 
	for(i=0; i<12867; i++){
		printf("%d\t%f\t%d\t%d\n", i, ((float)i)/2048.0, (int)(isin(i)), (int)(icos(i))); 
	}
	return 0; 
}

We compiled and ran this program on the command line:

 gcc integer_trig.c 
 ./a.out > test.txt

Then we imported the data into matlab and plotted (Actual double floating-point sine and cosine are plotted on the same axis as thin black and magenta lines, respectively)

Later, we had to change the naive standard implementation of multiply to assembly to properly implement the fixed-point arithmetic - the MSP430's standard library did not implement the 16x16 multiply followed by shift correctly (it only keeps the bottom 16 bits). Note: this assembly function is for use with Code-Composer Studio, available from the Texas Instruments website. It seems that the IAR compiler uses different assembly syntax.

;*******************************************************************************
            .cdecls C,LIST,"msp430x54x.h"  ; Include device header file

;-------------------------------------------------------------------------------
            .text                           ; Progam Start
;-------------------------------------------------------------------------------
                       
          ;;.sect "mply_11"
          ;;.asmfunc "mply_11"
                        .global mply_11 ;; this MUST BE PRECEDED BY TABS !!!!!

mply_11: 
        PUSH	SR	; 
         DINT		; turn off interrupts here. 
         NOP			; required after DINT
         MOV.W	R12, &MPY ; load the first operand. 
         MOV.W	R13, &OP2 ; load the second operand & start multiplication. 
         MOV.W	&RESLO, R12 ; low to R12 (this is the return value)
         MOV.W	&RESHI, R13 ; high to R13
         RRA.W R12 ; 1
         RRA.W R12 ; 2
         RRA.W R12 ; 3
         RRA.W R12 ; 4 
         RRA.W R12 ; 5
         RRA.W R12 ; 6
         RRA.W R12 ; 7
         RRA.W R12 ; 8
         RRA.W R12 ; 9
         RRA.W R12 ; 10
         RRA.W R12 ; 11
         
         RLA.W R13 ; 1
         RLA.W R13 ; 2
         RLA.W R13 ; 3
         RLA.W R13 ; 4
         RLA.W R13 ; 5
         
        ;; r14 can be clobbered across a function call, according to the msp430 ABI
        MOV.W	#0x001f, R14
        AND.W	R14, R12 ; mask off all but the bottom 5 bits from RESLO
         ADD.W	R13, R12 ; add (logical OR) the results. R12 is the 11-bit fixed point result.
         POP.W 	SR	;
         RETA		; return from subroutine.

            .end

Note the MSP430 does not have an opcode for multiple arithmetic shifts, nor does it have code for logical shifts - hence the need for repeated shifts and bitmasks!

Section 3 - Video tracking and host computer control

With the microcontroller done, we then moved to controlling it via a video-tracking computer. At this point, we had created a simple program for testing out parallel port control of the light's three axes using the keyboard (tilt, pan, and shutter). This program was split into two files, a main, and a set of subroutines that could then be called and compiled into the full video tracking program. It uses libparapin to abstract interaction with the parallel port in userspace.

First, the main loop, which is very simple:

#include "parallelout.h"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>

char g_main_loop ; 

int main(int argc, char *argv[])
{
	g_main_loop = 1; 
	char c;
	parallel_setup(); 
 
	while(1){
		c = fgetc(stdin);
		interpret_cmd(c); 
	}
}

Second, the parallel port controller. This uses a thread and circular queue to provide asynchronous, non-blocking control of the communications channel. Non-blocking is critical, as the program waits a small period between low-high transition of the interrupt pin (pin 4) for the MSP430 to read the status of the three lines.

#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <parapin.h>
#include <pthread.h>

#include "parallelout.h"

char g_q[1024]; //queue for the commands. 

int g_q_rptr; //where to read the next command from. 
int g_q_wptr; //where to put the next command

double g_velPan; 
double g_velTilt; 

void stepstep(void){
       int i = 0;
       for(i=0; i<20000; i++){
               set_pin(LP_PIN[4]);
               clear_pin(LP_PIN[4]);
	       set_pin(LP_PIN[5]);
               clear_pin(LP_PIN[5]);
	       set_pin(LP_PIN[5]);
               clear_pin(LP_PIN[5]);
       }
}

void velstep(int n){
	//printf("velstep %d\n", n); 
	clear_pin(LP_PIN[4]);
	if(n&0x1) set_pin(LP_PIN[2]) ; 
	else clear_pin(LP_PIN[2]); 
		
	if(n&0x2) set_pin(LP_PIN[3]) ; 
	else clear_pin(LP_PIN[3]); 
	set_pin(LP_PIN[4]); 
	//leave it up, so the msp430 knows it is a velocity command. 
}
void openShutter(){
	printf("opening shutter\n"); 
	clear_pin(LP_PIN[4]);
	set_pin(LP_PIN[2]); 
	set_pin(LP_PIN[3]); 
	set_pin(LP_PIN[4]); 
	clear_pin(LP_PIN[4]); //clear the trigger to indicate a shutter command. 
}
void closeShutter(){
	printf("closing shutter\n"); 
	clear_pin(LP_PIN[4]);
	clear_pin(LP_PIN[2]); 
	clear_pin(LP_PIN[3]); 
	set_pin(LP_PIN[4]); 
	clear_pin(LP_PIN[4]); //clear the trigger to indicate a shutter command. 
}
void smallShutter(){
	printf("small shutter\n"); 
	clear_pin(LP_PIN[4]);
	clear_pin(LP_PIN[2]); 
	set_pin(LP_PIN[3]); 
	set_pin(LP_PIN[4]); 
	clear_pin(LP_PIN[4]); //clear the trigger to indicate a shutter command. 
}
void stopMirror(){
	printf("stop mirror\n"); 
	clear_pin(LP_PIN[4]);
	set_pin(LP_PIN[2]); 
	clear_pin(LP_PIN[3]); 
	set_pin(LP_PIN[4]); 
	clear_pin(LP_PIN[4]); //clear the trigger to indicate a shutter command. 
}

void parallel_setup(){
	if (pin_init_user(LPT1) < 0)
		exit(0);

	pin_output_mode(LP_DATA_PINS | LP_SWITCHABLE_PINS);
	clear_pin(LP_PIN[2]);
	clear_pin(LP_PIN[3]); 
	clear_pin(LP_PIN[4]); 
	pthread_t thread1;
	//start the queue-servicing thread. 
	pthread_create ( &thread1, NULL, pq_thread, NULL ); 
}

void interpret_cmd(char cmd){
	//these codes don't make much sense unless you are 
	//controlling from a keyboard.
	switch(cmd){
		case 'w': velstep(0); g_velPan-=1.0; break; //pan to right (looking at but of light)
		case 'a': velstep(1); g_velTilt+=1.0; break; //tilt toward.
		case 's': velstep(2); g_velPan+=1.0; break; //pan to left
		case 'd': velstep(3); g_velTilt-=1.0; break; //tilt away
		case 'o': openShutter(); break; 
		case 'c': closeShutter(); break; 
		case 'x': smallShutter(); break;
		case ' ': stopMirror(); g_velPan=0; g_velTilt=0; break; 
	}
}

void usleep(int us){
	timespec ts; 
	ts.tv_sec = 0; 
	ts.tv_nsec = us * 1000; 
	nanosleep(&ts, NULL);
}

extern int g_main_loop ; 

void* pq_thread(void* a){
	while(g_main_loop){
		if(g_q_wptr > g_q_rptr){
			char cmd = g_q[g_q_rptr % sizeof(g_q)]; 
			g_q_rptr++; 
			interpret_cmd(cmd); 
		}
		usleep( 200 ); // run at max 500hz update.
		// the msp430 takes about 125us to service the parallel port irq.
	}
	return (void*)0;
}

void enqueue(char cmd){
	//this should be sufficiently atomic so there is no thread contention.
	g_q[g_q_wptr % sizeof(g_q)] = cmd; 
	g_q_wptr++; 
}

Then, we worked on the video tracking program. I will omit the some of the noncritical sections involving the firewire (ieee1394), Xv (video display) and X11 (window manager) calls, as the whole program is long, ~1000 lines. Below is 'main' -- see the comments for a detailed description.

int main(int arc, char *argv[]){
	int i; 
	double t1, t2, t3, t4; 
	t1 = t2 = t3 = t4 = 0.0; 
	signal(SIGINT, cleanup); //trap cntrl c
	signal(SIGPIPE, cleanup);
	//turn off output buffering for ttcp!
	//setvbuf(stdout,(char*)NULL,_IONBF,0);
	//init buffers for old tracking... 
	for(int i=0; i<4; i++){
                g_buffer[i] = (short*)malloc(640*480*sizeof(short)); 
        }
        g_averagefb = (int*)malloc(640*480*sizeof(int)); 
        g_velfb = (int*)malloc(640*480*sizeof(int));
        g_lastfb = (unsigned char*)malloc(640*480);	
        g_trackedfb = (unsigned char*)malloc(640*480);	
        for(i=0; i < 640*480; i++){
                g_averagefb[i] = 0; 
        }
	//Step -2: set up threads (the display runs on a seperate thread
	// to keep from blocking the iscochronous recieve channel
	// and hence causing the frame rate to drop. 
	pthread_mutex_init(&g_dispthread_mutex, NULL); 
	pthread_cond_init(&g_dispthread_cond, NULL); 
	//STEP -1: init the parallel port for control of mirror (this also starts that thread)
	parallel_setup(); 
	//Step 0: small shutter so we can track the light easily. 
	smallShutter(); 
	//Step 0.5: move the mirror to the close left (from but of light) for calibration
	//for reference (from the viewpoint of the cord end of the light): 
	// pan left : s
	// pan right: w
	// tilt toward: a
	// tilt away: s
	// small shutter: x
	// open shutter: o
	// closed shutter: c
	// stop mirrors : <space>
	for(i=0; i<10; i++){
		enqueue('s'); //to the left if you are looking at the but of the light.
		enqueue('a'); //the tilt axis has far fewer steps for full range than
		enqueue('s'); // the pan axis hence requires a much higher velocity - 
		enqueue('s'); // so enqueue more 's'. 
		enqueue('s'); 
		enqueue('s'); 
	}
	//Step 1: Open ohci and assign a handle to it.
	//==================================================================================================================
	init_cards();
        //Step 2: Get the camera nodes and describe them as we find them.
	//==================================================================================================================
	init_cams();
        //Step 3: Setup Capture
	//==================================================================================================================
	setup_cams();
	//Step 4: Start sending data
	//==================================================================================================================
	start_iso_transmission();
	//start the other thread. 
	pthread_t thread1;
	pthread_attr_t attr;
	pthread_attr_init(&attr);
	pthread_create( &thread1, &attr, display_thread, 0 ); 
	
	//Main event loop
	while(g_main_loop==true){
		for( i=0; i<numCameras; i++){
			if(dc1394_dma_single_capture(&camera[i]) != DC1394_SUCCESS){
				fprintf(stderr, "dma1394: Failed to capture from cameras\n");
				cleanup(0);
			}
		}
		t2=get_time();
		for( i=0; i<numCameras; i++){
			//display_frames_old(i);
			display_frames(i);
			/*if((g_frame%60) < 15){
				velstep(0); 
			}else if((g_frame%60) < 45){
				velstep(2); 
			}else {
				velstep(0); 
			}	*/			
			if(dc1394_dma_done_with_buffer(&camera[i]) != DC1394_SUCCESS){
				fprintf(stderr, "dma1394: Can't release dma bufer\n");
			}
		}
		
		if(g_frame % 60 == 0){
			printf("frame dt: %f (%f) track time %f (%f)\n", t2-t4, 1/(t2-t4), tracktime, 1/(tracktime)); 
		}
		//start with the state machine for the calibration -- 
		if(g_frame == CALIB_0){
			enqueue(' '); //stop it
			printf("!!assuming that the mirror reached it's limit!!\n");
			for( i=0; i<3; i++){
				//now that we have put the mirror into a corner, give it velocity
				// to move to the center of the FOV so that we may turn on 
				// feedback-based tracking. 
				enqueue('w'); //to the left if you are looking at the but of the light.
				enqueue('d'); 
				enqueue('w'); //again, pan motor has many more steps/range than tilt
				enqueue('w');
				enqueue('w');
				enqueue('w');
				enqueue('w');
				enqueue('w');
				enqueue('w');
			}
		}
		if(g_frame == CALIB_1){
			enqueue(' '); //stop it
			printf("!!assuming light is centered now!!\n");
		}
		if(g_frame == CALIB_2){
			enqueue('x');  
		}
		t4 = t2; 
		g_frame++; 
	}
	cleanup(0); 
	return 0;
}

Our tracking algorithm periodically opens and closes the shutter on the light. It is impossible to track a target based on brightness or even pattern detection, since the light is so bright it is impossible to image what it hits and what it does not with our cameras of limited dynamic range. (The human eye, of course, has far better dynamic range.) During the period when the light is off, we wait for the camera shutter speed to stabilize, then average the brightest spot over 10 consecutive frames to obtain a target position. Then, the shutter is opened, and visual feedback is used with a simple PD controller to guide the light to the target. When the device is deployed, we will make the update non-periodic and purely contingent on the detection of motion or of decreased solar cell output. See below for the thread that implements this logic, as well as blits the image onto the screen.

void* display_thread(void* ptr ){
	make_window(); 
	while(g_main_loop){
		if(pthread_cond_wait(&g_dispthread_cond, &g_dispthread_mutex) == 0){
			pthread_mutex_unlock(&g_dispthread_mutex); 
			double t1 = get_time(); 
			g_first_frame=false;
			//convert into the XV format (this seems very inefficient to me...)
			for(unsigned int i=0; i< g_framewidth*g_frameheight; i++){
				g_fb[i] = g_trackedfb[i]+ 0x8000;
			}
			double c_r, c_c; 
			new_track(0, &tcam[0], g_trackedfb, 
				g_framewidth, g_frameheight, CONTRAST_MIN, SEARCHR, 
				GAUSSDROPOFF, NUMBER_OF_MARKERS, &c_r, &c_c);
			xv_image=XvCreateImage(display, info[adaptor].base_id, 
				format_disp, (char*)g_fb, g_framewidth, g_frameheight*numCameras);
			XvPutImage(display, info[adaptor].base_id, window, gc, xv_image, 0, 
				0, g_framewidth, g_frameheight*numCameras, 0, 0,
				g_windowwidth, g_windowheight);
			free(xv_image); 
			
			//do some dumb control (finally!)
			// initially, guide the light to the center of the screen. 
			if(g_frame > CALIB_1 && g_frame <= CALIB_2){
				g_target_c = 320.0; 
				g_target_r = 240.0; 
				servo_mirror(c_c, c_r); //get it stuck on the center! 
			}
			int time = g_frame - CALIB_2; 
			// below is the *main loop* for cycling the shutter open/close
			if(g_frame > CALIB_2){
				if(time % 300 < 240){
					servo_mirror(c_c, c_r); 
				}
				if(time % 300 == 240){
					enqueue('c'); 
					enqueue(' '); 
				}
				if(time % 300  >= 260 && time % 300  < 280 ){
					g_target_c += c_c; 
					g_target_r += c_r; 
				}
				if(time % 300 == 280){
					enqueue('x'); 
					g_target_c /= 20.0; 
					g_target_r /= 20.0; 
				}
			}
			double t2 = get_time(); 
			tracktime = t2 - t1 ; 
		}
		//normalize_com(NUMBER_OF_MARKERS);
		XFlush(display);
		while(XPending(display)>0){
			XNextEvent(display,&xev);
			switch(xev.type){
				case ConfigureNotify:
					g_windowwidth=xev.xconfigure.width;
					g_windowheight=xev.xconfigure.height;
				break;
				case KeyPress:
					switch(XKeycodeToKeysym(display,xev.xkey.keycode,0)){
						case XK_q:
						case XK_Q:
							g_main_loop = false; 
							//cleanup(0);
						break;
						}
				break;
			}
		} //XPending
	}
	if ((void *)window != NULL){
		XUnmapWindow(display,window);
	}
	fprintf(stderr,"dma1394: Unmapped Window.\n");
	if (display != NULL){
		XFlush(display);
	}
	return (void*) 0;
}

The PD controller uses very pessimistic values for the coefficients, as we discovered that the timing resolution on out older linux computer is low - about 5ms. This means that if too many velocity step commands are sent to the parallel port thread at one time, it will get backlogged, which will induce a phase-shift between control and actuation of velocity. Hence, the light must move rather slowly, on the order of one velocity step on each axis per frame. Again, below.

void servo_mirror(double c_c, double c_r ){
	double dc = c_c - g_target_c; //for now assume that we want to stabilize in
	double dr = c_r - g_target_r; // the center.
	double vgain = 8.0 ; 
	double pgain = 1.0/80.0; 
	int lim = 1; 
	double c = dc + g_velPan*vgain ; 
	int ccmd = 0; 
	int rcmd = 0; 
	if(c > 0){
		for(int i=0; i<c*pgain && i < lim; i++){
			enqueue('w');
			ccmd --; 
		}
	}
	if(c < 0){
		for(int i=0; i<c*-1.0*pgain && i < lim; i++){
			enqueue('s');
			ccmd ++; 
		}
	}
		
	vgain *= 1.5; //tilt mirror moves quicker!
	double r = dr + g_velTilt*vgain;
	if(r>0){
		for(int i=0; i<r*pgain && i < lim; i++){
			enqueue('d');
			rcmd--; 
		}
	}
	if(r<0){
		for(int i=0; i<r*-1.0*pgain && i < lim; i++){
			enqueue('a');
			rcmd++; 
		}
	}
	//this for debugging loop stability problems in matlab. 
	//printf("%f %f %d %f %f %d\n", dc, g_velPan*vgain, ccmd, dr, g_velTilt*vgain, rcmd); 
	//if(dr + g_velTilt*vgain > 0) enqueue('d'); OLD
	//if(dr + g_velTilt*vgain < 0) enqueue('a'); 
}

Our video tracking algorithm first uses a tree-like algorithm to quickly and robustly search for the brightest region in the scene; we presume, somewhat simplistically, that this will be the target. When the device is put into use with an actual monkey cage, we'll surround the camera with high-intensity infrared LEDs to effectively illuminate a retroreflector placed on the monkey's head. Below is the code which performs this computation.

//make a blur matrix
//void blur(frame_info* frame, unsigned char * fb, int framewidth, int downsamp, int downsamp_w, int downsamp_h){
void blur(int camno, track_cam* tcam, unsigned char* fb, int framewidth, int downsamp_r, int downsamp_c, int downsamp_w, int downsamp_h){
	//initialize contrasts
	for(int m=0; m<downsamp_r * downsamp_c; m++){
		tcam[camno].frame.sum[m]=0;
		tcam[camno].frame.contr_min[m]=255;
		tcam[camno].frame.contr_max[m]=0;
	}
	for(int k=0; k<downsamp_r; k++){	
		for(int row=k*downsamp_h; row<k*downsamp_h+downsamp_h; row++){
			for(int j=0; j<downsamp_c; j++){
				for(int col=j*downsamp_w; col<j*downsamp_w+downsamp_w; col++){
					tcam[camno].frame.sum[j+(k*downsamp_c)]+=int(fb[row*framewidth+col]);
					if(int(fb[row*framewidth+col])>tcam[camno].frame.contr_max[j+(k*downsamp_c)]){
						tcam[camno].frame.contr_max[j+(k*downsamp_c)]=int(fb[row*framewidth+col]); //introducing a contrast check.  
					}
					if(int(fb[row*framewidth+col])<tcam[camno].frame.contr_min[j+(k*downsamp_c)]){
						tcam[camno].frame.contr_min[j+(k*downsamp_c)]=int(fb[row*framewidth+col]); //introducing a contrast check
					}
				}
			}
		}
	}
}

//blob_search function
//search through the sum matrix and find the brightest sums
//void blob_search(frame_info* frame, marker* marker, int num_markers, int contrast_min){
void blob_search(int camno, track_cam* tcam, int num_markers, int contrast_min, int downsamp_r, int downsamp_c){
	//frame->num_blobs=0; //innocent until proven guilty
	for(int i=0; i<num_markers; i++){
		int blob_val=0;
		for(int m=0; m<downsamp_r*downsamp_c; m++){
			if(tcam[camno].frame.sum[m]>blob_val && tcam[camno].frame.contr_max[m]-tcam[camno].frame.contr_min[m]>contrast_min){ //has to have a big contrast to be a blob (CONTRAST is user defined macro)
				blob_val=tcam[camno].frame.sum[m]; //the new max'
				tcam[camno].marker[i].downsamp_loc=m; //the sum integer (0-255)
				//frame->num_blobs++;
			}
		}
		tcam[camno].frame.sum[tcam[camno].marker[i].downsamp_loc]=0; //kill the one we just found so we can find the next biggest one.
	}
}

//brightest_pix_search function
//search through the blobs for the brightest pixel
//void brightest_pix_search(unsigned char * fb, frame_info* frame, marker* marker, int num_markers, int framewidth, int downsamp, int downsamp_w, int downsamp_h){
void brightest_pix_search(unsigned char * fb, int camno, track_cam* tcam, int num_markers, int framewidth, int downsamp_r, int downsamp_c, int downsamp_w, int downsamp_h){
	//br_pix_info[0] is the row
	//br_pix_info[1] is the col
	//br_pix_info[2] is the value
	for(int i=0; i<num_markers; i++){
		tcam[camno].marker[i].br_pix_val=0; //always has to start low
		for(int row=int(floor(tcam[camno].marker[i].downsamp_loc/downsamp_c))*downsamp_h; row<int(floor(tcam[camno].marker[i].downsamp_loc/downsamp_c))*downsamp_h+downsamp_h; row++){
			for(int col=tcam[camno].marker[i].downsamp_loc%downsamp_c*downsamp_w; col<tcam[camno].marker[i].downsamp_loc%downsamp_c*downsamp_w+downsamp_w; col++){
				if(int(fb[row*framewidth+col])>tcam[camno].marker[i].br_pix_val){ //if it is greater than the brightest pixel then store its info
					tcam[camno].marker[i].br_pix_row=row; //save the row
					tcam[camno].marker[i].br_pix_col=col; //save the column
					tcam[camno].marker[i].br_pix_val=int(fb[row*framewidth+col]); //save the value
				}
			}
		}
	}
}

The blocking (or blobbing) and search algorithm yields the estimated location of the brightest pixel in the image. This is passed to a specialized array-growth region growing algorithm which dynamically expands a region around the suggested brightest pixel to include all pixels that are within a threshold of brightness to the brightest. The region growing algorithm then computes the center of mass from the list of pixel coordinates, which are then passed to the PD and target location routines.

void region_grow(unsigned char * src, unsigned short* dest, 
		int w, int h, int b_r, int b_c, double* c_r, double* c_c){
	//need to do an expansion from the brightest point. 
	//this is sorta a random-access op - which is bad. 
	unsigned short fill = 0xff00; 
	int n = 0; 
	short r, c; 
	int  i, p; 
	unsigned char thresh = 20 ; 
	unsigned char brightest = src[w*b_r + b_c]; 
	g_rows[n] = b_r; 
	g_cols[n] = b_c; 
	n++; 
	int sta = 0; 
	int end = 0; 
	int lim = sizeof(g_rows)/sizeof(int); 
	while(n < lim && n > sta){
		//loop through all the new points, adding to the set as we go. 
		end = n; 
		for(i=sta; i < end; i++){
			r = g_rows[i]; 
			c =  g_cols[i]; 
			r++; //down
			if(r >= 0 && r < h && c >= 0 && c < w && n < lim){
				p = r*w +c; 
				if(brightest - src[p] < thresh){
					src[p] = 0; 
					dest[p] = fill; 
					g_rows[n] = r; 
					g_cols[n] = c; 
					n++; 
				}
			}
			r -= 2; //up.
			if(r >= 0 && r < h && c >= 0 && c < w && n < lim){
				p = r*w +c; 
				if(brightest - src[p] < thresh){
					src[p] = 0; 
					dest[p] = fill; 
					g_rows[n] = r; 
					g_cols[n] = c; 
					n++; 
				}
			}
			r++; //center
			c++; //to the right. 
			if(r >= 0 && r < h && c >= 0 && c < w && n < lim){
				p = r*w +c; 
				if(brightest - src[p] < thresh){
					src[p] = 0; 
					dest[p] = fill; 
					g_rows[n] = r; 
					g_cols[n] = c; 
					n++; 
				}
			}
			c-=2; //to the left. 
			if(r >= 0 && r < h && c >= 0 && c < w && n < lim){
				p = r*w +c; 
				if(brightest - src[p] < thresh){
					src[p] = 0; 
					dest[p] = fill; 
					g_rows[n] = r; 
					g_cols[n] = c; 
					n++; 
				}
			}
		}//end loop over past points. 
		sta = end; 
	}
	//calculate the center of mass. 
	double cm_r = 0; 
	double cm_c = 0; 
	for(i=0; i<n; i++){
		cm_r += g_rows[i]; 
		cm_c += g_cols[i]; 
	}
	cm_r /= n; 
	cm_c /= n; 
	*c_r = cm_r; 
	*c_c = cm_c; 
	//printf("point: %f %f %d \n",  cm_r, cm_c, g_frame++); 
	int cm_r_i, cm_c_i; 
	cm_r_i = (int)cm_r; 
	cm_c_i = (int)cm_c; 
	if(cm_c_i >= 0 && cm_c_i < w && cm_r_i >= 0 && cm_r_i < h)
		dest[cm_r_i*w + cm_c_i] = 0xffff; 
}

And that is, roughly, the entirety of the video tracking program! (Most of the rest of the code deals with the firewire bus and other less interesting details.) We conclude with a picture of the whole setup in the office.

{648}
hide / / print
ref: notes-0 tags: ME270 light video tracking date: 12-04-2008 00:07 gmt revision:0 [head]

Section 3 - Video tracking and host computer control

With the microcontroller done, we then moved to controlling it via a video-tracking computer. At this point, we had created a simple program for testing out parallel port control of the light's three axes using the keyboard (tilt, pan, and shutter). This program was split into two files, a main, and a set of subroutines that could then be called and compiled into the full video tracking program. It uses libparapin to abstract interaction with the parallel port in userspace.

First, the main loop, which is very simple:

#include "parallelout.h"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>

char g_main_loop ; 

int main(int argc, char *argv[])
{
	g_main_loop = 1; 
	char c;
	parallel_setup(); 
 
	while(1){
		c = fgetc(stdin);
		interpret_cmd(c); 
	}
}

Second, the parallel port controller. This uses a thread and circular queue to provide asynchronous, non-blocking control of the communications channel. Non-blocking is critical, as the program waits a small period between low-high transition of the interrupt pin (pin 4) for the MSP430 to read the status of the three lines.

#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <parapin.h>
#include <pthread.h>

#include "parallelout.h"

char g_q[1024]; //queue for the commands. 

int g_q_rptr; //where to read the next command from. 
int g_q_wptr; //where to put the next command

double g_velPan; 
double g_velTilt; 

void stepstep(void){
       int i = 0;
       for(i=0; i<20000; i++){
               set_pin(LP_PIN[4]);
               clear_pin(LP_PIN[4]);
	       set_pin(LP_PIN[5]);
               clear_pin(LP_PIN[5]);
	       set_pin(LP_PIN[5]);
               clear_pin(LP_PIN[5]);
       }
}

void velstep(int n){
	//printf("velstep %d\n", n); 
	clear_pin(LP_PIN[4]);
	if(n&0x1) set_pin(LP_PIN[2]) ; 
	else clear_pin(LP_PIN[2]); 
		
	if(n&0x2) set_pin(LP_PIN[3]) ; 
	else clear_pin(LP_PIN[3]); 
	set_pin(LP_PIN[4]); 
	//leave it up, so the msp430 knows it is a velocity command. 
}
void openShutter(){
	printf("opening shutter\n"); 
	clear_pin(LP_PIN[4]);
	set_pin(LP_PIN[2]); 
	set_pin(LP_PIN[3]); 
	set_pin(LP_PIN[4]); 
	clear_pin(LP_PIN[4]); //clear the trigger to indicate a shutter command. 
}
void closeShutter(){
	printf("closing shutter\n"); 
	clear_pin(LP_PIN[4]);
	clear_pin(LP_PIN[2]); 
	clear_pin(LP_PIN[3]); 
	set_pin(LP_PIN[4]); 
	clear_pin(LP_PIN[4]); //clear the trigger to indicate a shutter command. 
}
void smallShutter(){
	printf("small shutter\n"); 
	clear_pin(LP_PIN[4]);
	clear_pin(LP_PIN[2]); 
	set_pin(LP_PIN[3]); 
	set_pin(LP_PIN[4]); 
	clear_pin(LP_PIN[4]); //clear the trigger to indicate a shutter command. 
}
void stopMirror(){
	printf("stop mirror\n"); 
	clear_pin(LP_PIN[4]);
	set_pin(LP_PIN[2]); 
	clear_pin(LP_PIN[3]); 
	set_pin(LP_PIN[4]); 
	clear_pin(LP_PIN[4]); //clear the trigger to indicate a shutter command. 
}

void parallel_setup(){
	if (pin_init_user(LPT1) < 0)
		exit(0);

	pin_output_mode(LP_DATA_PINS | LP_SWITCHABLE_PINS);
	clear_pin(LP_PIN[2]);
	clear_pin(LP_PIN[3]); 
	clear_pin(LP_PIN[4]); 
	pthread_t thread1;
	//start the queue-servicing thread. 
	pthread_create ( &thread1, NULL, pq_thread, NULL ); 
}

void interpret_cmd(char cmd){
	//these codes don't make much sense unless you are 
	//controlling from a keyboard.
	switch(cmd){
		case 'w': velstep(0); g_velPan-=1.0; break; //pan to right (looking at but of light)
		case 'a': velstep(1); g_velTilt+=1.0; break; //tilt toward.
		case 's': velstep(2); g_velPan+=1.0; break; //pan to left
		case 'd': velstep(3); g_velTilt-=1.0; break; //tilt away
		case 'o': openShutter(); break; 
		case 'c': closeShutter(); break; 
		case 'x': smallShutter(); break;
		case ' ': stopMirror(); g_velPan=0; g_velTilt=0; break; 
	}
}

void usleep(int us){
	timespec ts; 
	ts.tv_sec = 0; 
	ts.tv_nsec = us * 1000; 
	nanosleep(&ts, NULL);
}

extern int g_main_loop ; 

void* pq_thread(void* a){
	while(g_main_loop){
		if(g_q_wptr > g_q_rptr){
			char cmd = g_q[g_q_rptr % sizeof(g_q)]; 
			g_q_rptr++; 
			interpret_cmd(cmd); 
		}
		usleep( 200 ); // run at max 500hz update.
		// the msp430 takes about 125us to service the parallel port irq.
	}
	return (void*)0;
}

void enqueue(char cmd){
	//this should be sufficiently atomic so there is no thread contention.
	g_q[g_q_wptr % sizeof(g_q)] = cmd; 
	g_q_wptr++; 
}

Then, we worked on the video tracking program. I will omit the some of the noncritical sections involving the firewire (ieee1394), Xv (video display) and X11 (window manager) calls, as the whole program is long, ~1000 lines. Below is 'main' -- see the comments for a detailed description.

int main(int arc, char *argv[]){
	int i; 
	double t1, t2, t3, t4; 
	t1 = t2 = t3 = t4 = 0.0; 
	signal(SIGINT, cleanup); //trap cntrl c
	signal(SIGPIPE, cleanup);
	//turn off output buffering for ttcp!
	//setvbuf(stdout,(char*)NULL,_IONBF,0);
	//init buffers for old tracking... 
	for(int i=0; i<4; i++){
                g_buffer[i] = (short*)malloc(640*480*sizeof(short)); 
        }
        g_averagefb = (int*)malloc(640*480*sizeof(int)); 
        g_velfb = (int*)malloc(640*480*sizeof(int));
        g_lastfb = (unsigned char*)malloc(640*480);	
        g_trackedfb = (unsigned char*)malloc(640*480);	
        for(i=0; i < 640*480; i++){
                g_averagefb[i] = 0; 
        }
	//Step -2: set up threads (the display runs on a seperate thread
	// to keep from blocking the iscochronous recieve channel
	// and hence causing the frame rate to drop. 
	pthread_mutex_init(&g_dispthread_mutex, NULL); 
	pthread_cond_init(&g_dispthread_cond, NULL); 
	//STEP -1: init the parallel port for control of mirror (this also starts that thread)
	parallel_setup(); 
	//Step 0: small shutter so we can track the light easily. 
	smallShutter(); 
	//Step 0.5: move the mirror to the close left (from but of light) for calibration
	//for reference (from the viewpoint of the cord end of the light): 
	// pan left : s
	// pan right: w
	// tilt toward: a
	// tilt away: s
	// small shutter: x
	// open shutter: o
	// closed shutter: c
	// stop mirrors : <space>
	for(i=0; i<10; i++){
		enqueue('s'); //to the left if you are looking at the but of the light.
		enqueue('a'); //the tilt axis has far fewer steps for full range than
		enqueue('s'); // the pan axis hence requires a much higher velocity - 
		enqueue('s'); // so enqueue more 's'. 
		enqueue('s'); 
		enqueue('s'); 
	}
	//Step 1: Open ohci and assign a handle to it.
	//==================================================================================================================
	init_cards();
        //Step 2: Get the camera nodes and describe them as we find them.
	//==================================================================================================================
	init_cams();
        //Step 3: Setup Capture
	//==================================================================================================================
	setup_cams();
	//Step 4: Start sending data
	//==================================================================================================================
	start_iso_transmission();
	//start the other thread. 
	pthread_t thread1;
	pthread_attr_t attr;
	pthread_attr_init(&attr);
	pthread_create( &thread1, &attr, display_thread, 0 ); 
	
	//Main event loop
	while(g_main_loop==true){
		for( i=0; i<numCameras; i++){
			if(dc1394_dma_single_capture(&camera[i]) != DC1394_SUCCESS){
				fprintf(stderr, "dma1394: Failed to capture from cameras\n");
				cleanup(0);
			}
		}
		t2=get_time();
		for( i=0; i<numCameras; i++){
			//display_frames_old(i);
			display_frames(i);
			/*if((g_frame%60) < 15){
				velstep(0); 
			}else if((g_frame%60) < 45){
				velstep(2); 
			}else {
				velstep(0); 
			}	*/			
			if(dc1394_dma_done_with_buffer(&camera[i]) != DC1394_SUCCESS){
				fprintf(stderr, "dma1394: Can't release dma bufer\n");
			}
		}
		
		if(g_frame % 60 == 0){
			printf("frame dt: %f (%f) track time %f (%f)\n", t2-t4, 1/(t2-t4), tracktime, 1/(tracktime)); 
		}
		//start with the state machine for the calibration -- 
		if(g_frame == CALIB_0){
			enqueue(' '); //stop it
			printf("!!assuming that the mirror reached it's limit!!\n");
			for( i=0; i<3; i++){
				//now that we have put the mirror into a corner, give it velocity
				// to move to the center of the FOV so that we may turn on 
				// feedback-based tracking. 
				enqueue('w'); //to the left if you are looking at the but of the light.
				enqueue('d'); 
				enqueue('w'); //again, pan motor has many more steps/range than tilt
				enqueue('w');
				enqueue('w');
				enqueue('w');
				enqueue('w');
				enqueue('w');
				enqueue('w');
			}
		}
		if(g_frame == CALIB_1){
			enqueue(' '); //stop it
			printf("!!assuming light is centered now!!\n");
		}
		if(g_frame == CALIB_2){
			enqueue('x');  
		}
		t4 = t2; 
		g_frame++; 
	}
	cleanup(0); 
	return 0;
}

Our tracking algorithm periodically opens and closes the shutter on the light. It is impossible to track a target based on brightness or even pattern detection, since the light is so bright it is impossible to image what it hits and what it does not with our cameras of limited dynamic range. (The human eye, of course, has far better dynamic range.) During the period when the light is off, we wait for the camera shutter speed to stabilize, then average the brightest spot over 10 consecutive frames to obtain a target position. Then, the shutter is opened, and visual feedback is used with a simple PD controller to guide the light to the target. When the device is deployed, we will make the update non-periodic and purely contingent on the detection of motion or of decreased solar cell output. See below for the thread that implements this logic, as well as blits the image onto the screen.

void* display_thread(void* ptr ){
	make_window(); 
	while(g_main_loop){
		if(pthread_cond_wait(&g_dispthread_cond, &g_dispthread_mutex) == 0){
			pthread_mutex_unlock(&g_dispthread_mutex); 
			double t1 = get_time(); 
			g_first_frame=false;
			//convert into the XV format (this seems very inefficient to me...)
			for(unsigned int i=0; i< g_framewidth*g_frameheight; i++){
				g_fb[i] = g_trackedfb[i]+ 0x8000;
			}
			double c_r, c_c; 
			new_track(0, &tcam[0], g_trackedfb, 
				g_framewidth, g_frameheight, CONTRAST_MIN, SEARCHR, 
				GAUSSDROPOFF, NUMBER_OF_MARKERS, &c_r, &c_c);
			xv_image=XvCreateImage(display, info[adaptor].base_id, 
				format_disp, (char*)g_fb, g_framewidth, g_frameheight*numCameras);
			XvPutImage(display, info[adaptor].base_id, window, gc, xv_image, 0, 
				0, g_framewidth, g_frameheight*numCameras, 0, 0,
				g_windowwidth, g_windowheight);
			free(xv_image); 
			
			//do some dumb control (finally!)
			// initially, guide the light to the center of the screen. 
			if(g_frame > CALIB_1 && g_frame <= CALIB_2){
				g_target_c = 320.0; 
				g_target_r = 240.0; 
				servo_mirror(c_c, c_r); //get it stuck on the center! 
			}
			int time = g_frame - CALIB_2; 
			// below is the *main loop* for cycling the shutter open/close
			if(g_frame > CALIB_2){
				if(time % 300 < 240){
					servo_mirror(c_c, c_r); 
				}
				if(time % 300 == 240){
					enqueue('c'); 
					enqueue(' '); 
				}
				if(time % 300  >= 260 && time % 300  < 280 ){
					g_target_c += c_c; 
					g_target_r += c_r; 
				}
				if(time % 300 == 280){
					enqueue('x'); 
					g_target_c /= 20.0; 
					g_target_r /= 20.0; 
				}
			}
			double t2 = get_time(); 
			tracktime = t2 - t1 ; 
		}
		//normalize_com(NUMBER_OF_MARKERS);
		XFlush(display);
		while(XPending(display)>0){
			XNextEvent(display,&xev);
			switch(xev.type){
				case ConfigureNotify:
					g_windowwidth=xev.xconfigure.width;
					g_windowheight=xev.xconfigure.height;
				break;
				case KeyPress:
					switch(XKeycodeToKeysym(display,xev.xkey.keycode,0)){
						case XK_q:
						case XK_Q:
							g_main_loop = false; 
							//cleanup(0);
						break;
						}
				break;
			}
		} //XPending
	}
	if ((void *)window != NULL){
		XUnmapWindow(display,window);
	}
	fprintf(stderr,"dma1394: Unmapped Window.\n");
	if (display != NULL){
		XFlush(display);
	}
	return (void*) 0;
}

The PD controller uses very pessimistic values for the coefficients, as we discovered that the timing resolution on out older linux computer is low - about 5ms. This means that if too many velocity step commands are sent to the parallel port thread at one time, it will get backlogged, which will induce a phase-shift between control and actuation of velocity. Hence, the light must move rather slowly, on the order of one velocity step on each axis per frame. Again, below.

void servo_mirror(double c_c, double c_r ){
	double dc = c_c - g_target_c; //for now assume that we want to stabilize in
	double dr = c_r - g_target_r; // the center.
	double vgain = 8.0 ; 
	double pgain = 1.0/80.0; 
	int lim = 1; 
	double c = dc + g_velPan*vgain ; 
	int ccmd = 0; 
	int rcmd = 0; 
	if(c > 0){
		for(int i=0; i<c*pgain && i < lim; i++){
			enqueue('w');
			ccmd --; 
		}
	}
	if(c < 0){
		for(int i=0; i<c*-1.0*pgain && i < lim; i++){
			enqueue('s');
			ccmd ++; 
		}
	}
		
	vgain *= 1.5; //tilt mirror moves quicker!
	double r = dr + g_velTilt*vgain;
	if(r>0){
		for(int i=0; i<r*pgain && i < lim; i++){
			enqueue('d');
			rcmd--; 
		}
	}
	if(r<0){
		for(int i=0; i<r*-1.0*pgain && i < lim; i++){
			enqueue('a');
			rcmd++; 
		}
	}
	//this for debugging loop stability problems in matlab. 
	//printf("%f %f %d %f %f %d\n", dc, g_velPan*vgain, ccmd, dr, g_velTilt*vgain, rcmd); 
	//if(dr + g_velTilt*vgain > 0) enqueue('d'); OLD
	//if(dr + g_velTilt*vgain < 0) enqueue('a'); 
}

Our video tracking algorithm first uses a tree-like algorithm to quickly and robustly search for the brightest region in the scene; we presume, somewhat simplistically, that this will be the target. When the device is put into use with an actual monkey cage, we'll surround the camera with high-intensity infrared LEDs to effectively illuminate a retroreflector placed on the monkey's head. Below is the code which performs this computation.

//make a blur matrix
//void blur(frame_info* frame, unsigned char * fb, int framewidth, int downsamp, int downsamp_w, int downsamp_h){
void blur(int camno, track_cam* tcam, unsigned char* fb, int framewidth, int downsamp_r, int downsamp_c, int downsamp_w, int downsamp_h){
	//initialize contrasts
	for(int m=0; m<downsamp_r * downsamp_c; m++){
		tcam[camno].frame.sum[m]=0;
		tcam[camno].frame.contr_min[m]=255;
		tcam[camno].frame.contr_max[m]=0;
	}
	for(int k=0; k<downsamp_r; k++){	
		for(int row=k*downsamp_h; row<k*downsamp_h+downsamp_h; row++){
			for(int j=0; j<downsamp_c; j++){
				for(int col=j*downsamp_w; col<j*downsamp_w+downsamp_w; col++){
					tcam[camno].frame.sum[j+(k*downsamp_c)]+=int(fb[row*framewidth+col]);
					if(int(fb[row*framewidth+col])>tcam[camno].frame.contr_max[j+(k*downsamp_c)]){
						tcam[camno].frame.contr_max[j+(k*downsamp_c)]=int(fb[row*framewidth+col]); //introducing a contrast check.  
					}
					if(int(fb[row*framewidth+col])<tcam[camno].frame.contr_min[j+(k*downsamp_c)]){
						tcam[camno].frame.contr_min[j+(k*downsamp_c)]=int(fb[row*framewidth+col]); //introducing a contrast check
					}
				}
			}
		}
	}
}

//blob_search function
//search through the sum matrix and find the brightest sums
//void blob_search(frame_info* frame, marker* marker, int num_markers, int contrast_min){
void blob_search(int camno, track_cam* tcam, int num_markers, int contrast_min, int downsamp_r, int downsamp_c){
	//frame->num_blobs=0; //innocent until proven guilty
	for(int i=0; i<num_markers; i++){
		int blob_val=0;
		for(int m=0; m<downsamp_r*downsamp_c; m++){
			if(tcam[camno].frame.sum[m]>blob_val && tcam[camno].frame.contr_max[m]-tcam[camno].frame.contr_min[m]>contrast_min){ //has to have a big contrast to be a blob (CONTRAST is user defined macro)
				blob_val=tcam[camno].frame.sum[m]; //the new max'
				tcam[camno].marker[i].downsamp_loc=m; //the sum integer (0-255)
				//frame->num_blobs++;
			}
		}
		tcam[camno].frame.sum[tcam[camno].marker[i].downsamp_loc]=0; //kill the one we just found so we can find the next biggest one.
	}
}

//brightest_pix_search function
//search through the blobs for the brightest pixel
//void brightest_pix_search(unsigned char * fb, frame_info* frame, marker* marker, int num_markers, int framewidth, int downsamp, int downsamp_w, int downsamp_h){
void brightest_pix_search(unsigned char * fb, int camno, track_cam* tcam, int num_markers, int framewidth, int downsamp_r, int downsamp_c, int downsamp_w, int downsamp_h){
	//br_pix_info[0] is the row
	//br_pix_info[1] is the col
	//br_pix_info[2] is the value
	for(int i=0; i<num_markers; i++){
		tcam[camno].marker[i].br_pix_val=0; //always has to start low
		for(int row=int(floor(tcam[camno].marker[i].downsamp_loc/downsamp_c))*downsamp_h; row<int(floor(tcam[camno].marker[i].downsamp_loc/downsamp_c))*downsamp_h+downsamp_h; row++){
			for(int col=tcam[camno].marker[i].downsamp_loc%downsamp_c*downsamp_w; col<tcam[camno].marker[i].downsamp_loc%downsamp_c*downsamp_w+downsamp_w; col++){
				if(int(fb[row*framewidth+col])>tcam[camno].marker[i].br_pix_val){ //if it is greater than the brightest pixel then store its info
					tcam[camno].marker[i].br_pix_row=row; //save the row
					tcam[camno].marker[i].br_pix_col=col; //save the column
					tcam[camno].marker[i].br_pix_val=int(fb[row*framewidth+col]); //save the value
				}
			}
		}
	}
}

The blocking (or blobbing) and search algorithm yields the estimated location of the brightest pixel in the image. This is passed to a specialized array-growth region growing algorithm which dynamically expands a region around the suggested brightest pixel to include all pixels that are within a threshold of brightness to the brightest. The region growing algorithm then computes the center of mass from the list of pixel coordinates, which are then passed to the PD and target location routines.

void region_grow(unsigned char * src, unsigned short* dest, 
		int w, int h, int b_r, int b_c, double* c_r, double* c_c){
	//need to do an expansion from the brightest point. 
	//this is sorta a random-access op - which is bad. 
	unsigned short fill = 0xff00; 
	int n = 0; 
	short r, c; 
	int  i, p; 
	unsigned char thresh = 20 ; 
	unsigned char brightest = src[w*b_r + b_c]; 
	g_rows[n] = b_r; 
	g_cols[n] = b_c; 
	n++; 
	int sta = 0; 
	int end = 0; 
	int lim = sizeof(g_rows)/sizeof(int); 
	while(n < lim && n > sta){
		//loop through all the new points, adding to the set as we go. 
		end = n; 
		for(i=sta; i < end; i++){
			r = g_rows[i]; 
			c =  g_cols[i]; 
			r++; //down
			if(r >= 0 && r < h && c >= 0 && c < w && n < lim){
				p = r*w +c; 
				if(brightest - src[p] < thresh){
					src[p] = 0; 
					dest[p] = fill; 
					g_rows[n] = r; 
					g_cols[n] = c; 
					n++; 
				}
			}
			r -= 2; //up.
			if(r >= 0 && r < h && c >= 0 && c < w && n < lim){
				p = r*w +c; 
				if(brightest - src[p] < thresh){
					src[p] = 0; 
					dest[p] = fill; 
					g_rows[n] = r; 
					g_cols[n] = c; 
					n++; 
				}
			}
			r++; //center
			c++; //to the right. 
			if(r >= 0 && r < h && c >= 0 && c < w && n < lim){
				p = r*w +c; 
				if(brightest - src[p] < thresh){
					src[p] = 0; 
					dest[p] = fill; 
					g_rows[n] = r; 
					g_cols[n] = c; 
					n++; 
				}
			}
			c-=2; //to the left. 
			if(r >= 0 && r < h && c >= 0 && c < w && n < lim){
				p = r*w +c; 
				if(brightest - src[p] < thresh){
					src[p] = 0; 
					dest[p] = fill; 
					g_rows[n] = r; 
					g_cols[n] = c; 
					n++; 
				}
			}
		}//end loop over past points. 
		sta = end; 
	}
	//calculate the center of mass. 
	double cm_r = 0; 
	double cm_c = 0; 
	for(i=0; i<n; i++){
		cm_r += g_rows[i]; 
		cm_c += g_cols[i]; 
	}
	cm_r /= n; 
	cm_c /= n; 
	*c_r = cm_r; 
	*c_c = cm_c; 
	//printf("point: %f %f %d \n",  cm_r, cm_c, g_frame++); 
	int cm_r_i, cm_c_i; 
	cm_r_i = (int)cm_r; 
	cm_c_i = (int)cm_c; 
	if(cm_c_i >= 0 && cm_c_i < w && cm_r_i >= 0 && cm_r_i < h)
		dest[cm_r_i*w + cm_c_i] = 0xffff; 
}

And that is, roughly, the entirety of the video tracking program! (Most of the rest of the code deals with the firewire bus and other less interesting details.) We conclude with a picture of the whole setup in the office.

{624}
hide / / print
ref: notes-0 tags: elektralite mm150 optical power light ME270 project date: 12-03-2008 20:39 gmt revision:5 [4] [3] [2] [1] [0] [head]

Section 1 : Reverse-engineering the light, part selection

Rather than engineering our own articulated spotlight, we elected to buy a moving-mirror DJ light; creating our own light source would simply have taken too much time, and demanded optical experience that we lack. After a brief survey, we bought an Elekralite mm150 (moving mirror, 150W bulb) DJ light, one of the cheapest on the market ($650); despite this, it is very well constructed, as you will see below. The light was originally controlled through the stage-specific DMX bus, but the possibility of directly controlling the light through this was discarded after we learned that the resolution on each axis is only 8 bits (+- 127); given the large range of the pan mirror, this is insufficient for tracking. Hence we decided to reverse engineer the light to determine the best way to directly control the mirror and shutter.
  • Overview, with dichroic color filter wheel and globos removed. We ended up putting these back in, as without them the unit does not initialize properly.
  • Another view with the shell taken off. The white ballast on the bottom powers the 150W HMI metal-halide bulb, along with a stabilization capacitor (to the left of the ballast) and starter (below the capacitor). The starter produces 2kV to initially ionize the gas in the bulb; there is no third starter electrode as in mercury vapor bulbs, as this would be a point of stress in the high-pressure (90atm) fused quartz arc tube. The bulb is on whenever the unit is plugged in, and the light output is controlled via the shutter. The top large transformer (+20V, +12V) is used to drive the display/control unit (left center) and the 6 stepper motors in the lamp, which are:
    • mirror tilt
    • mirror pan
    • shutter
    • globo selection
    • globo rotate
    • colorwheel
    • Detail of the breakout board for the 6 stepper motors, with the colorwheel stepper above it. The stepper motors are two-phase, permanent-magnet rotor types; we're not sure of the steps/revolution yet, though they all seem different. The breakout board also has attachments for the hall-effect sensors to detect orientation of the globo and color wheels.
    • The dichroic mirror color wheel. Regular filters would undoubtedly melt here - a pencil put into the beam of the HMI lamp will light on fire!
    • The globo wheel. Each globo is mounted to a gear which engages a center gear on the same axis as this plate. This center gear is driven by the globo rotate stepper motor.
    • Dichroic IR filter. Note that since it is viewed at an angle, the reflective wavelength goes up, and so you can see a slight red tint in the reflections off this. Removed this, since silicon photovoltaic cells respond to near IR energy.
    • Stepper motor controller. The three chips at the bottom are NXP 87LPC768 OTP microcontroller. Each can control two stepper motors, and each has an internal program for self-calibration using either the hall-effect sensors or hard mechanical limits on motor range. Upper chip is for the mirror (U3). These chips are controlled via one high-speed serial bus from the upper control board (the one with the LED display). The six upper chips, with heat-sinks on them, contain dual H-bridges, probably similar to the Allegro A3955 PWM microstepping motor driver. Between them are two standard 74HC04 hex inverters to drive the opposite leg of the h-bridges (each 87LPC768 has four outputs, two per motor, whereas each motor requires four inputs, two per phase. In this way, you can switch the direction of current flow through each motor phase with a single +20v supply rail.)
    • The Elekralite mm150 without the two control boards, globo wheel, or filter wheel. You can see the lamp, focus adjustment screws and reflector. Below the lamp is a 24V radial fan.
    • The Elektralite powered on, with globo and filter wheel back in place. The lamp is very, very bright!
    • Close-up of filter and globo wheels. The shutter has been removed; it fits on the shaft just above the long hex spacer. Just above that is the mechanical hard stop for the shutter.
    • The pin on the main controller that is used to control the 6 stepper axes. We tried probing it for some time, but could not directly figure out the serial protocol without a good logic analyzer (which is at home), so decided to implement our own microstepping PWM controller, and drive the inverters / H-bridges driectly.
    • The shutter, after being used for a good bit. The UV and IR from the lamp (mostly the IR, I think - the shutter gets really hot!) bleaches the black anodization on this bit of aluminum. We've drilled a 1/4" hole in one side to try to get a more tightly-collated beam.
    • The PWM waveform at the motor for the two phases (the glare at the left is from the light, which was of course on!). The other leads in the motor are simply the inverse of these via the 74HC04. For vector control like this, a 50% on/50% off PWM effectively sets the phase current to zero (the motor has sufficient leakage inductance at the 16kHz PWM frequency to keep di/dt low over multiple PWM periods). Hence, to control the motors we need something like:
      phase1 = 0.5*sin(theta) + 0.5 ; phase2 = 0.5*cos(theta) + 0.5
      , where theta is changed based on the control algorithm, and phase is scaled and used to set the PWM counter compare register. See below.
    • Microstepping control scheme based on vector control of current in the two phases of these stepper motors. As before, current will be regulated via PWM @ 16kHz, as the Elektralite engineers do, as opposed to linear current control. To generate the appropriate 6 PWM signals (mirror pan, mirror tilt, and shutter), we used a MSP430 microcontroller with 3 16-bit counters and 5+ capture-compare registers.
    • We used a MSP430F5xxx development board from TI -- see the next section. The device features sufficient timers and PWM outputs to control the stepper motors in the Elekralite. We will control motion of each using a simple up-down toggling via the parallel port to increment/decrement the motor theta variable. The microcontroller will need a look-up table or algorithm for calculating the sines and cosines of rotor angles.
    • small solar cell from Spark Fun Electronics. When placed in the beam of the light, it is possible to generate 500ma short-circuit current and 5V open-circuit voltage, but not at the same time. In a solar cell composed of a series of junctions, such as this, the total current at full voltage is limited by the minimum current from each of the junctions (they are in series!). Therefore to get maximum current you need very uniform illumination, something that is normally not difficult with the sun, but which will be difficult in this application. To avoid this problem we will try to procure higher efficiency single-junction GaAs concentrator cells, which can be up to ~40% efficient (as compared to this one, which is ~15% efficient).

{35}
hide / / print
ref: bookmark-0 tags: metal_halide projector light CRI Venture Osram Phillips date: 0-0-2007 0:0 revision:0 [head]

Overview: a projector light should have good luminous efficiency, have a long life, and most importantly have plenty of energy in the red region of the spectrum. most metal halides have yellow/green lines and blue lines, few have good red lines.

http://www.osram.no/brosjyrer/english/K01KAP5_en.pdf in 1000 watt, the Osram Powerstar HQI-TS 1000/d/s looks the best: CRI > 90, 5900K color temperature. Unfortunately, I cannot seem to find any american places to buy this bulb, nor can i determine its average life. It can be bought, at a price, from http://www.svetila.com/eProdaja/product_info.php/products_id/442 { n.b. the osram HMI bulbs are no good-the lifetime is too short}

In 400 watt, the Eye Clean Arc MT400D/BUD looks quite good, with a CRI of 90, 6500K color temp. http://www.eyelighting.com/cleanarc.html. EYE also has a ceraarc line, but the 400w bulb is not yet in production (and it has a lower color temperature, 4000K). Can be bought from http://www.businesslights.com/ (N.B. they have spectral charts for many of the lights!)

  • I've also seen reference to the Phillips mastercolor line: http://www.nam.lighting.philips.com/us/ecatalog/hid/pdf/p-5497c.pdf
    • these are ceramic HPS white replacements ('retro-white'). 85CRI, 4000K color temperature, reasonably efficient over the life of the bulb.
  • Ushio
  • Venture lighting has a 400W naturalWhite e-lamp (5000k, 90+ CRI). For use with both pulse-start and the electronic ballasts that they sell.

and fYI, the electrodelass bulbs are made by Osram and are called "ICETRON". They are rather expensive, but last 1e5 hours (!). Typical output is 80 lumens/watt

more things of interest:

{26}
hide / / print
ref: notes-0 tags: architecture aesthetic light date: 0-0-2006 0:0 revision:0 [head]

http://www.thomaslockehobbs.com/ -- interetsing photoblog of a globetrotter & laconic harvard intellectual

http://www.uni-weimar.de/architektur/InfAR/lehre/Entwurf/Patterns/107/ca_107.html Modern buildings are often shaped with no concern for natural light - they depend almost antirely on artificial light. But buildings which displace natural light as the major source of illumination are not fit places to spend the day.