Friday 13 February 2009

Genetic Algorithm

A simple genetic algorithm, It follows the Inman Harvey's microbial genetic algorithm.

Problem:

You have 10 cards numbered from 1 to 10. You have to choose a way of dividing them into 2 piles, so that the cards in Pile_0 *sum* to a number as close as possible to 36, and the remaining cards in Pile_1 *multiply* to a number as close as possible to 360.


Solution:


// CardProblembyGA.cpp : Defines the entry point for the console application.
#include "stdafx.h"
//#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <conio.h>

#define POP 1000
#define LEN 10
#define REC 0.5
#define MUT 0.1
#define END 1000

#define TARGET_SUM 36
#define TARGET_PROD 360

#define RANDOM_NUM ((float)rand()/(RAND_MAX+1))

void init_pop(char gene[POP][LEN]);
double evaluate(char gene[LEN]);
void display(char gene[LEN]);

int _tmain(int argc, _TCHAR* argv[])
{
char gene[POP][LEN];
int Winner, Loser, temp1, temp2;
bool WinnerFound = false;

srand((int)time(NULL));
init_pop(gene);
for(int t=0;t<END && !WinnerFound;t++){
temp1 = (int) (POP * RANDOM_NUM);
temp2 = (int) (POP * RANDOM_NUM);
if(evaluate(gene[temp1]) < evaluate(gene[temp2])){
Winner = temp1;
Loser = temp2;
}
else {
Winner = temp2;
Loser = temp1;
}

for(int i=0;i<LEN;i++){
if(RANDOM_NUM < REC)
gene[Loser][i] = gene[Winner][i];
if(RANDOM_NUM < MUT)
gene[Loser][i] = gene[Loser][i]=='1' ? '0':'1';

if(evaluate(gene[Loser]) == 0.0){
printf("\nIdeal condtion found on %d tournaments...", t);
//WinnerFound = true;
display(gene[Loser]);
break;
}
}
}
/*if(!WinnerFound)
printf("Unable to find ideal condtion on %d tournaments...", END);*/
getche();
return 0;
}

void init_pop(char gene[POP][LEN])
{
for(int i = 0; i<POP; i++){
for(int j = 0; j<LEN; j++){
if(RANDOM_NUM > 0.5f)
gene[i][j] = '1';
else
gene[i][j] = '0';
}
}
}

double evaluate(char gene[LEN])
{
int sum = 0, prod = 1;
double sum_deviation, prod_deviation, combined_deviation;

for(int i=0;i<LEN;i++){
if(gene[i]=='1')
prod *=(i+1);
else
sum +=(i+1);
}
sum_deviation = ((double)(sum-TARGET_SUM))/TARGET_SUM;
prod_deviation = ((double)(prod-TARGET_PROD))/TARGET_PROD;
combined_deviation = abs(sum_deviation) + abs(prod_deviation);
return combined_deviation;
}

void display(char gene[LEN])
{
int x = 0;
printf("\n Product pile elements:");
for(int i=0;i<LEN;i++){
if(gene[i]=='1')
printf(" %d",i+1);
}
printf("\n Sum pile elements:");
for(int i=0;i<LEN;i++)
{
if(gene[i]=='0')
printf(" %d",i+1);
}
}

Single Layer Perceptron

Below is the code written in C++ to train the single layer perceptron (single layer network) for 2-class classification.

// SingleLayerPerceptron.cpp : Defines the entry point for the console application.
// Single Layer Perceptron training example code
// Author: Rudra P.K. Poudel

#include "stdafx.h"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<float.h>
#include<conio.h>

#define NN 3 // Number of neurons (2 Inputs + 1 bias )
#define LEARNING_RATE 0.1 // Learning rate
#define INPUTS_COUNT 4 // Number of input patterns i.e. training sets
#define MAX_TRAIN_ITERATION 10000 //Maximum nos of iteration to train the networks

#define PRINT_OPTION 1
//Enum flag to control the output print
enum PrintOptions
{
poNone = 0, //Dont print any output
poTraining = 1 // Printing whole training process
};

//Function decleration
int PerceptronOutput(const double weights[NN], const int inputs[NN]);
double NodeOutput(const double weights[NN], const int inputs[NN]);

//Main entry point of the function
//It will train the all possible target patterns for NN (number of nodes) inputs for 2-class problem
//This program training single layer perceptron for the XAND operation
int _tmain(int argc, _TCHAR* argv[])
{
int i,j, iterations, trainFunction;
double totalError, singleError, output;
char filename[25];
FILE *pFile;

//Input patterns, last one is always 1 as it is input for bias
int inputs[][NN]= {
{0,0,1},
{0,1,1},
{1,0,1},
{1,1,1}
};

/*int targets[INPUTS_COUNT] = {1,1,1,0};*/
//Possible combination of targets class with target +1,-1
int targets[INPUTS_COUNT] = {1,1,1,-1};

if(PRINT_OPTION==poTraining){
sprintf(filename,"Training_Function.dat");
pFile = fopen(filename,"w");
}

double weights[]= {0,0,0};
iterations=0;
//Training Perceptron with adaline learning algorithm
do{
totalError=0;
for(i=0;i<INPUTS_COUNT;i++){
output = PerceptronOutput(weights,inputs[i]);
singleError = targets[i] - output;

if(PRINT_OPTION==poTraining){
//Printing input values
for(j=0;j<NN;j++){
fprintf(pFile,"%d\t",inputs[i][j]);
}
//Printing weights
for(j=0;j<NN;j++){
fprintf(pFile,"%f\t",weights[j]);
}
//Printing target output
fprintf(pFile,"%d\t",targets[i]);
//Printing node output
fprintf(pFile,"%f\t",NodeOutput(weights,inputs[i]));
//Printing Perceptron output
fprintf(pFile,"%f\t",output);
//Printing error
fprintf(pFile,"%f\t",singleError);
//Printing adjustment
fprintf(pFile,"%f\t", ((double)LEARNING_RATE) * singleError);

}
if(singleError!=0){
for(j=0;j<NN;j++){
weights[j] = weights[j] + ((double)LEARNING_RATE) * inputs[i][j] * singleError;
}
}
if(PRINT_OPTION==poTraining){
//Printing new weights
for(j=0;j<NN;j++){
fprintf(pFile,"%f\t",weights[j]);
}
fprintf(pFile,"\n");
}
totalError += fabs(singleError);
}
iterations++;
printf("Iterations: %d\tTotal Error= %f\n",iterations,totalError);
}while(totalError!=0 && iterations<MAX_TRAIN_ITERATION);
//END- While loop to train one function

if(PRINT_OPTION==poTraining)
fclose(pFile);

printf("\n\nLearned weights:\n");
for(j=0;j<NN;j++){
printf("%f\t",weights[j]);
}
printf("\n\nPress any to to close the program...");
getche();
return 0;
}

//Function Name: PerceptronOutput
//Input: Weights of the connections and inputs to the node
//Putput: 1 if <NodeOput> is >=0 else -1
int PerceptronOutput(const double weights[NN], const int inputs[NN]){

if(NodeOutput(weights,inputs)>=0)
return 1;
else
return -1;
}

//Function Name: NodeOutput
//Input: Weights of the connections and inputs to the node
//Putput: Give sumation of weight and input sensor's product
double NodeOutput(const double weights[NN], const int inputs[NN]){
double output=0;

for(int i=0;i<NN;i++)
output +=weights[i]*inputs[i];

return output;
}