Vns Institute of Technology: 2010-2011 Department of Computer Science & Engineering
Vns Institute of Technology: 2010-2011 Department of Computer Science & Engineering
(AFFILATED TO RGTU)
BHOPAL (M.P.)
2010-2011
4.
WAP to implement N-Queen’s Problem.
1.
2. WAP to implement breadth first search algorithm in C.
#include <stdio.h>
#include <conio.h>
#include <alloc.h>
#define TRUE 1
#define FALSE 0
#define MAX 8
struct node
{
int data ;
struct node *next ;
};
int visited[MAX] ;
int q[8] ;
int front, rear ;
void main( )
{
struct node *arr[MAX] ;
struct node *v1, *v2, *v3, *v4 ;
int i ;
clrscr( ) ;
v1 = getnode_write ( 2 ) ;
arr[0] = v1 ;
v1 -> next = v2 = getnode_write ( 3 ) ;
v2 -> next = NULL ;
v1 = getnode_write ( 1 ) ;
arr[1] = v1 ;
v1 -> next = v2 = getnode_write ( 4 ) ;
v2 -> next = v3 = getnode_write ( 5 ) ;
v3 -> next = NULL ;
v1 = getnode_write ( 1 ) ;
arr[2] = v1 ;
v1 -> next = v2 = getnode_write ( 6 ) ;
v2 -> next = v3 = getnode_write ( 7 ) ;
v3 -> next = NULL ;
v1 = getnode_write ( 2 ) ;
arr[3] = v1 ;
v1 -> next = v2 = getnode_write ( 8 ) ;
v2 -> next = NULL ;
v1 = getnode_write ( 2 ) ;
arr[4] = v1 ;
v1 -> next = v2 = getnode_write ( 8 ) ;
v2 -> next = NULL ;
v1 = getnode_write ( 3 ) ;
arr[5] = v1 ;
v1 -> next = v2 = getnode_write ( 8 ) ;
v2 -> next = NULL ;
v1 = getnode_write ( 3 ) ;
arr[6] = v1 ;
v1 -> next = v2 = getnode_write ( 8 ) ;
v2 -> next = NULL ;
v1 = getnode_write ( 4 ) ;
arr[7] = v1 ;
v1 -> next = v2 = getnode_write ( 5 ) ;
v2 -> next = v3 = getnode_write ( 6 ) ;
v3 -> next = v4 = getnode_write ( 7 ) ;
v4 -> next = NULL ;
front = rear = -1 ;
bfs ( 1, arr ) ;
getch( ) ;
}
visited[v - 1] = TRUE ;
printf ( "%d\t", v ) ;
addqueue ( v ) ;
while ( u != NULL )
{
if ( visited [ u -> data - 1 ] == FALSE )
{
addqueue ( u -> data ) ;
visited [ u -> data - 1 ] = TRUE ;
printf ( "%d\t", u -> data ) ;
}
u = u -> next ;
}
}
}
rear++ ;
q[rear] = vertex ;
if ( front == -1 )
front = 0 ;
}
int deletequeue( )
{
int data ;
if ( front == -1 )
{
printf ( "\nQueue Underflow." ) ;
exit( ) ;
}
data = q[front] ;
if ( front == rear )
front = rear = -1 ;
else
front++ ;
return data ;
}
int isempty( )
{
if ( front == -1 )
return TRUE ;
return FALSE ;
}
while ( n != NULL )
{
temp = n -> next ;
free ( n ) ;
n = temp ;
}
}
#include <stdio.h>
mynode *root;
add_node(int value);
add_node(5);
add_node(1);
add_node(-20);
add_node(100);
add_node(23);
add_node(67);
add_node(13);
getch();
}
temp = malloc(sizeof(mynode));
temp->value = value;
temp->right = NULL;
temp->left = NULL;
if(root == NULL) {
printf("\nCreating the root..\n");
root = temp;
return;
}
prev = NULL;
cur = root;
while(cur != NULL) {
prev = cur;
//cur = (value < cur->value) ? cur->left:cur->right;
if(value < cur->value) {
cur = cur->left;
} else {
cur = cur->right;
}
}
while(root) {
printf("[%d] ", root->value);
if(root->left) {
queue[size++] = root->left;
}
if(root->right) {
queue[size++] = root->right;
}
root = queue[queue_pointer++];
}
}
PriorityQueue OpenList
List ClosedList
startNode.g = 0;
startNode.h = EstimateCostToEndNode(startNode)
startNode.f = startNode.g + startNode.h
startNode.parent = null
Open.Insert(startNode)
ClosedList.Insert(node)
return FALSE
N-Queen's Problem
#include <iostream.h>
#include <ctype.h>
#include <stdlib.h>
#include <stdio.h>
#include <conio.h>
void check(int, int, char [100][100]);
void print(char [100][100]);
int main(void)
{
int row, col, i;
char board[100][100], response;
clrscr();
printf("
");
printf("
Enter the number of queens : ");
scanf("%d", &no_of_queens);
if(no_of_queens > 23)
{
printf("
Want to continue(Y/N)?");
fflush(stdin);
scanf("%c", &response);
if(toupper(response) == 'N')
return (0);
}
else if(no_of_queens < 3)
{
printf("The number of Queen must be greater than 3.");
getch();
return (0);
}
clrscr();
printf("M/c in work ! Please Wait...");
// This for-loop is used for checking all the columns of row 0 only...
_setcursortype(_NOCURSOR);
for(col = 0; col < no_of_queens; col++)
{
memset(board, '-', sizeof(board));
check( 0, col, board );
}
clrscr();
printf("Thank you for seeing this program through.");
getch();
return (0);
}
// Vertical check...
for(i = 0; i < r; i++)
{
if ( board[i][c] == queen)
return;
}
// Horizontal check...
for(j = 0; j < c; j++)
{
if ( board[r][j] == queen)
return;
}
// Left-Diagonal check...
i = r; j = c;
do
{
if ( board[i][j] == queen )
return;
i--; j--;
}
while( i >= 0 && j >= 0 );
// Right-Diagonal check...
i = r; j = c;
do
{
if ( board[i][j] == queen )
return;
i--; j++;
}
while( i >= 0 && j < no_of_queens );
// This for-loop is used for checking all the columns for each row
//starting from 1 upto the end...
for(int p = 0; p < no_of_queens; p++)
check(r, p, board);
for(int h = 0; h < no_of_queens; h++)
board[r - 1][h] = '-';
}
gotoxy(62, 1);
printf("Press E to exit.");
textcolor(7);
Implementation
If you can’t find a matrix library for your implementation language, then you can
write a simple library yourself. Since neural nets do not require matrix inverses or
long chains of matrix products, you need not worry (much) about numerical stability,
thus the implementation is straightforward.
matrix transposition;
matrix addition;
matrix multiplication with a scalar;
ordinary matrix multiplication;
Hadamard multiplication (component-wise multiplication);
Kronecker multiplication (only necessary for between row and column
vectors); and
horizontal matrix concatenation.
The first few operations are standard matrix operations, but you might be less
familiar with the last three. (Also check out the Wikipedia article on matrix
multiplication – it covers all the types of multiplication mentioned here.)
The Kronecker product of a row vector and column vector is defined as a matrix
whose components are given by:
It is possible to define the product for arbitrary matrices, but we don’t need it.
The horizontal concatenation combines two matrices with the same number of rows.
For example, the matrices A and B below will be concatenated to form the new
matrix C:
if j < X_width
Z[i][j] = X[i][j]
else
Z[i][j] = Y[i, j – X_width]
If no graph libraries are available, simply write a function that will output a tab-
separated list of the input and output sequences to plot. You can then load or paste
this into your favourite spreadsheet program to make the necessary plots.
2. Implement Output and Class conversion functions
This is very simple: implement a function that converts an output matrix to a class
number vector, and another that converts a class number to an output vector.
For example, the output_to_class function will take the following matrix
100
010
001
100
001
(The second function will convert the second matrix back to the first matrix).
For this tutorial you can use the following three files:
iris_training.dat
iris_validation.dat
iris_test.dat
These three files contain samples from the ICU iris dataset, a simple and quite
famous dataset. In each file, samples or contained in rows. Each row has seven
entries, separated by tabs. The first four entries are features of irises (sepal length,
sepal width, petal length, and petal width); the last three is the outputs denoting the
species of iris (setosa, versicolor, and virginica). I have preprocessed the values a bit
to get them in the appropriate ranges.
You must read in the data so that you can treat the inputs of each set as a single
matrix; similarly for the outputs.
The activation function must take in a matrix X, and return a matrix Y. Y is computed
by applying a function component-wise to X. For now, use the hyperbolic tangent
function:
The activation function derivative must similarly take in a matrix X, and return a
matrix Y. Y is computed by applying the derivative of the activation componentwise
to X. The derivative of the function above is:
The function must take as arguments an input matrix, weight matrix, and a bias node
matrix.
This function must take in a maximum weight, a width and height, and return a
matrix of the given width and height, randomly initialised in the range [-max_weight
max_weight].
an input matrix,
a weight matrix,
a target output matrix,
a target class matrix,
a bias matrix.
The function must return the error e, and the classification error c.
To compute these, first compute the output matrix Z using the feed-forward function
(you can ignore the net matrix).
Now subtract the target output matrix from the output matrix, square the
components, add together, and normalise:
classes = classes_from_output_vectors(outputs)
Count the number of classes that corresponds with the target classes, and divide by
the number of samples to normalise:
c = sum_all_components(classes != target_classes)/sample_count
(Here, our inequality returns a matrix of 0s and 1s, with 1s in positions where the
corresponding components in classes and target_classes are not equal.)
An input matrix
A weight matrix
a learning rate (eta, as in the Greek letter)
a bias vector
The function must return an updated weight matrix. For now, return W as is.
The training function should take in three sets, the training_set, validation_set, and
test_set. Implement a way to limit the maximum number of samples that will
actually be used for training (you can also do his in the main program described in
the next section). This is very helpful for debugging purposes (especially if you plan
to later replace the backpropagation algorithm with something a little faster – and
more complicated).
The function should return a weight matrix, and error values as floats.
The function should initialise a weight matrix using initialise weights. For now, use a
max_weight of 1/2.
The function should also construct three bias vectors bias_training, bias_validate,
and bias_test. Each must contain only 1s, with as many rows as there are inputs in
the training, validation and test sets respectively.
Implement a while loop that stops after 500 iterations. (We will change the while
condition later to something else, so do not use a for loop).
Inside the loop, call the backpropagation algorithm. Use the training set inputs, the
weights, (for now) a fixed learning rate of 0.1, and bias vector bias_train. Assign the
result to weights.
Still inside the loop, call the network error function three times: one time for each of
the training, validation, and test sets. Use the weight matrix, and the appropriate bias
vector. Wrap these calls in an if-statement that tests for a value plot_graphs. (If your
language supports it, you can use conditional compilation on the value of
plot_graphs).
Store the errors in six arrays (error_train, classification_error_train, etc.), with the
current epoch number as index.
After the loop, plot the six error arrays as a function of epoch number. Wrap this in
an if-statement (or conditional compilation statement) that tests for the value
plot_graphs.
Call the network error function again, on all three sets as before.
The program should load in the sets (using the load_sets function), and pass these to
the training algorithm.
The important thing is that everything should run. You should see your error plots; at
this stage they should be straight, horizontal lines. Because of the random weight
initialisation, we cannot predict where these lines will lie (so do not be alarmed if
they do not look exactly the same as below – as long as they are straight and
horizontal).
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <conio.h>
void main( )
{
FILE *fp;
unsigned char i[100][100;
int m,n;
clrscr();
if((fp = fopen("lenna.dat","r+")) == 0)
{
printf("Just conat open the specified file.\n");
exit(1);
}
else
{
for(m=0;m<100;m++)
{
for(n=0;n<100;n++)
{
fscanf(fp,"%c",&i[m][n]);
}
}
fclose(fp);
getch();
}
/******************************************************************************
DECLARATIONS
******************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#define FALSE 0
#define TRUE 1
#define NOT !
#define AND &&
#define OR ||
#define LO -1
#define HI +1
#define BIAS 1
/******************************************************************************
RANDOMS DRAWN FROM DISTRIBUTIONS
******************************************************************************/
void InitializeRandoms()
{
srand(4711);
}
/******************************************************************************
APPLICATION-SPECIFIC CODE
******************************************************************************/
#define NUM_DATA 10
#define X 5
#define Y 7
#define N (X * Y)
#define M 10
{ " O ",
" OO ",
"O O ",
" O ",
" O ",
" O ",
" O " },
{ " O ",
" OO ",
" O O ",
"O O ",
"OOOOO",
" O ",
" O " },
{ "OOOOO",
"O ",
"O ",
"OOOO ",
" O",
"O O",
" OOO " },
{ "OOOOO",
" O",
" O",
" O ",
" O ",
" O ",
"O " },
{ {HI, LO, LO, LO, LO, LO, LO, LO, LO, LO},
{LO, HI, LO, LO, LO, LO, LO, LO, LO, LO},
{LO, LO, HI, LO, LO, LO, LO, LO, LO, LO},
{LO, LO, LO, HI, LO, LO, LO, LO, LO, LO},
{LO, LO, LO, LO, HI, LO, LO, LO, LO, LO},
{LO, LO, LO, LO, LO, HI, LO, LO, LO, LO},
{LO, LO, LO, LO, LO, LO, HI, LO, LO, LO},
{LO, LO, LO, LO, LO, LO, LO, HI, LO, LO},
{LO, LO, LO, LO, LO, LO, LO, LO, HI, LO},
{LO, LO, LO, LO, LO, LO, LO, LO, LO, HI} };
FILE* f;
Net->Eta = 0.001;
Net->Epsilon = 0.0001;
Count = 0;
for (i=0; i<M; i++) {
if (Output[i] == HI) {
Count++;
Index = i;
}
}
if (Count == 1)
fprintf(f, "%i\n", Index);
else
fprintf(f, "%s\n", "invalid");
}
/******************************************************************************
INITIALIZATION
******************************************************************************/
Net->InputLayer->Units = N;
Net->InputLayer->Output = (INT*) calloc(N+1, sizeof(INT));
Net->InputLayer->Output[0] = BIAS;
Net->OutputLayer->Units = M;
Net->OutputLayer->Activation = (REAL*) calloc(M+1, sizeof(REAL));
Net->OutputLayer->Output = (INT*) calloc(M+1, sizeof(INT));
Net->OutputLayer->Error = (REAL*) calloc(M+1, sizeof(REAL));
Net->OutputLayer->Weight = (REAL**) calloc(M+1, sizeof(REAL*));
Net->Eta = 0.1;
Net->Epsilon = 0.01;
}
/******************************************************************************
PROPAGATING SIGNALS
******************************************************************************/
/******************************************************************************
ADJUSTING WEIGHTS
******************************************************************************/
Net->Error = 0;
for (i=1; i<=Net->OutputLayer->Units; i++) {
Err = Target[i-1] - Net->OutputLayer->Activation[i];
Net->OutputLayer->Error[i] = Err;
Net->Error += 0.5 * sqr(Err);
}
}
/******************************************************************************
SIMULATING THE NET
******************************************************************************/
void SimulateNet(NET* Net, INT* Input, INT* Target, BOOL Training, BOOL
Protocoling)
{
INT Output[M];
ComputeOutputError(Net, Target);
if (Training)
AdjustWeights(Net);
}
/******************************************************************************
MAIN
******************************************************************************/
void main()
{
NET Net;
REAL Error;
BOOL Stop;
INT n,m;
InitializeRandoms();
GenerateNetwork(&Net);
RandomWeights(&Net);
InitializeApplication(&Net);
do {
Error = 0;
Stop = TRUE;
for (n=0; n<NUM_DATA; n++) {
SimulateNet(&Net, Input[n], Output[n], FALSE, FALSE);
Error = MAX(Error, Net.Error);
Stop = Stop AND (Net.Error < Net.Epsilon);
}
Error = MAX(Error, Net.Epsilon);
printf("Training %0.0f%% completed ...\n", (Net.Epsilon / Error) * 100);
if (NOT Stop) {
for (m=0; m<10*NUM_DATA; m++) {
n = RandomEqualINT(0, NUM_DATA-1);
SimulateNet(&Net, Input[n], Output[n], TRUE, FALSE);
}
}
} while (NOT Stop);
FinalizeApplication(&Net);
}
OOO
O O
O O
O O
O O
O O
OOO -> 0
O
OO
OO
O
O
O
O -> 1
OOO
O O
O
O
O
O
OOOOO -> 2
OOO
O O
O
OOO
O
O O
OOO -> 3
O
OO
OO
O O
OOOOO
O
O -> 4
OOOOO
O
O
OOOO
O
O O
OOO -> 5
OOO
O O
O
OOOO
O O
O O
OOO -> 6
OOOOO
O
O
O
O
O
O -> 7
OOO
O O
O O
OOO
O O
O O
OOO -> 8
OOO
O O
O O
OOOO
O
O O
OOO -> 9
******************************************************************************
====================================================
Network: Backpropagation Network with Bias Terms and Momentum
====================================================
******************************************************************************/
/******************************************************************************
DECLARATIONS
******************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define FALSE 0
#define TRUE 1
#define NOT !
#define AND &&
#define OR ||
#define LO 0.1
#define HI 0.9
#define BIAS 1
void InitializeRandoms()
{
srand(4711);
}
/******************************************************************************
APPLICATION-SPECIFIC CODE
******************************************************************************/
#define NUM_LAYERS 3
#define N 30
#define M 1
INT Units[NUM_LAYERS] = {N, 10, M};
REAL Sunspots_[NUM_YEARS];
REAL Sunspots [NUM_YEARS] = {
};
REAL Mean;
REAL TrainError;
REAL TrainErrorPredictingMean;
REAL TestError;
REAL TestErrorPredictingMean;
FILE* f;
void NormalizeSunspots()
{
INT Year;
REAL Min, Max;
Min = MAX_REAL;
Max = MIN_REAL;
for (Year=0; Year<NUM_YEARS; Year++) {
Min = MIN(Min, Sunspots[Year]);
Max = MAX(Max, Sunspots[Year]);
}
Mean = 0;
for (Year=0; Year<NUM_YEARS; Year++) {
Sunspots_[Year] =
Sunspots [Year] = ((Sunspots[Year]-Min) / (Max-Min)) * (HI-LO) + LO;
Mean += Sunspots[Year] / NUM_YEARS;
}
}
Net->Alpha = 0.5;
Net->Eta = 0.05;
Net->Gain = 1;
NormalizeSunspots();
TrainErrorPredictingMean = 0;
for (Year=TRAIN_LWB; Year<=TRAIN_UPB; Year++) {
for (i=0; i<M; i++) {
Out = Sunspots[Year+i];
Err = Mean - Out;
TrainErrorPredictingMean += 0.5 * sqr(Err);
}
}
TestErrorPredictingMean = 0;
for (Year=TEST_LWB; Year<=TEST_UPB; Year++) {
for (i=0; i<M; i++) {
Out = Sunspots[Year+i];
Err = Mean - Out;
TestErrorPredictingMean += 0.5 * sqr(Err);
}
}
f = fopen("BPN.txt", "w");
}
/******************************************************************************
INITIALIZATION
******************************************************************************/
Net->Layer[l]->Units = Units[l];
Net->Layer[l]->Output = (REAL*) calloc(Units[l]+1, sizeof(REAL));
Net->Layer[l]->Error = (REAL*) calloc(Units[l]+1, sizeof(REAL));
Net->Layer[l]->Weight = (REAL**) calloc(Units[l]+1, sizeof(REAL*));
Net->Layer[l]->WeightSave = (REAL**) calloc(Units[l]+1, sizeof(REAL*));
Net->Layer[l]->dWeight = (REAL**) calloc(Units[l]+1, sizeof(REAL*));
Net->Layer[l]->Output[0] = BIAS;
if (l != 0) {
for (i=1; i<=Units[l]; i++) {
Net->Layer[l]->Weight[i] = (REAL*) calloc(Units[l-1]+1,
sizeof(REAL));
Net->Layer[l]->WeightSave[i] = (REAL*) calloc(Units[l-1]+1,
sizeof(REAL));
Net->Layer[l]->dWeight[i] = (REAL*) calloc(Units[l-1]+1,
sizeof(REAL));
}
}
}
Net->InputLayer = Net->Layer[0];
Net->OutputLayer = Net->Layer[NUM_LAYERS - 1];
Net->Alpha = 0.9;
Net->Eta = 0.25;
Net->Gain = 1;
}
/******************************************************************************
SUPPORT FOR STOPPED TRAINING
******************************************************************************/
/******************************************************************************
BACKPROPAGATING ERRORS
******************************************************************************/
Net->Error = 0;
for (i=1; i<=Net->OutputLayer->Units; i++) {
Out = Net->OutputLayer->Output[i];
Err = Target[i-1]-Out;
Net->OutputLayer->Error[i] = Net->Gain * Out * (1-Out) * Err;
Net->Error += 0.5 * sqr(Err);
}
}
/******************************************************************************
SIMULATING THE NET
******************************************************************************/
void SimulateNet(NET* Net, REAL* Input, REAL* Output, REAL* Target, BOOL
Training)
{
SetInput(Net, Input);
PropagateNet(Net);
GetOutput(Net, Output);
ComputeOutputError(Net, Target);
if (Training) {
BackpropagateNet(Net);
AdjustWeights(Net);
}
}
TrainError = 0;
for (Year=TRAIN_LWB; Year<=TRAIN_UPB; Year++) {
SimulateNet(Net, &(Sunspots[Year-N]), Output, &(Sunspots[Year]), FALSE);
TrainError += Net->Error;
}
TestError = 0;
for (Year=TEST_LWB; Year<=TEST_UPB; Year++) {
SimulateNet(Net, &(Sunspots[Year-N]), Output, &(Sunspots[Year]), FALSE);
TestError += Net->Error;
}
fprintf(f, "\nNMSE is %0.3f on Training Set and %0.3f on Test Set",
TrainError / TrainErrorPredictingMean,
TestError / TestErrorPredictingMean);
}
fprintf(f, "\n\n\n");
fprintf(f, "Year Sunspots Open-Loop Prediction Closed-Loop
Prediction\n");
fprintf(f, "\n");
for (Year=EVAL_LWB; Year<=EVAL_UPB; Year++) {
SimulateNet(Net, &(Sunspots [Year-N]), Output, &(Sunspots [Year]), FALSE);
SimulateNet(Net, &(Sunspots_[Year-N]), Output_, &(Sunspots_[Year]), FALSE);
Sunspots_[Year] = Output_[0];
fprintf(f, "%d %0.3f %0.3f
%0.3f\n",
FIRST_YEAR + Year,
Sunspots[Year],
Output [0],
Output_[0]);
}
}
/******************************************************************************
MAIN
******************************************************************************/
void main()
{
NET Net;
BOOL Stop;
REAL MinTestError;
InitializeRandoms();
GenerateNetwork(&Net);
RandomWeights(&Net);
InitializeApplication(&Net);
Stop = FALSE;
MinTestError = MAX_REAL;
do {
TrainNet(&Net, 10);
TestNet(&Net);
if (TestError < MinTestError) {
fprintf(f, " - saving Weights ...");
MinTestError = TestError;
SaveWeights(&Net);
}
else if (TestError > 1.2 * MinTestError) {
fprintf(f, " - stopping Training and restoring Weights ...");
Stop = TRUE;
RestoreWeights(&Net);
}
} while (NOT Stop);
TestNet(&Net);
EvaluateNet(&Net);
FinalizeApplication(&Net);
}
/******************************************************************************
==============
Network: Hopfield Model
==============
******************************************************************************/
/******************************************************************************
DECLARATIONS
*****************************************************************************#i
nclude <stdlib.h>
#include <stdio.h>
#define FALSE 0
#define TRUE 1
#define NOT !
#define AND &&
#define OR ||
#define LO -1
#define HI +1
/******************************************************************************
RANDOMS DRAWN FROM DISTRIBUTIONS
******************************************************************************/
void InitializeRandoms()
{
srand(4711);
}
/******************************************************************************
APPLICATION-SPECIFIC CODE
******************************************************************************/
#define NUM_DATA 5
#define X 10
#define Y 10
#define N (X * Y)
CHAR Pattern[NUM_DATA][Y][X] = { { "O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O",
"O O O O O ",
" O O O O O" },
{ "OO OO OO",
"OO OO OO",
" OO OO ",
" OO OO ",
"OO OO OO",
"OO OO OO",
" OO OO ",
" OO OO ",
"OO OO OO",
"OO OO OO" },
{ "OOOOO ",
"OOOOO ",
"OOOOO ",
"OOOOO ",
"OOOOO ",
" OOOOO",
" OOOOO",
" OOOOO",
" OOOOO",
" OOOOO" },
{ "O O O O",
" O O O ",
" O O O ",
"O O O O",
" O O O ",
" O O O ",
"O O O O",
" O O O ",
" O O O ",
"O O O O" },
{ "OOOOOOOOOO",
"O O",
"O OOOOOO O",
"O O O O",
"O O OO O O",
"O O OO O O",
"O O O O",
"O OOOOOO O",
"O O",
"OOOOOOOOOO" } };
{ "OOO O O",
" O OOO OO",
" O O OO O",
" OOO O ",
"OO O OOO",
" O OOO O",
"O OO O O",
" O OOO ",
"OO OOO O ",
" O O OOO" },
{ "OOOOO ",
"O O OOO ",
"O O OOO ",
"O O OOO ",
"OOOOO ",
" OOOOO",
" OOO O O",
" OOO O O",
" OOO O O",
" OOOOO" },
{ "OOOOOOOOOO",
"O O",
"O O",
"O O",
"O OO O",
"O OO O",
"O O",
"O O",
"O O",
"OOOOOOOOOO" } };
FILE* f;
/******************************************************************************
INITIALIZATION
******************************************************************************/
void GenerateNetwork(NET* Net)
{
INT i;
Net->Units = N;
Net->Output = (INT*) calloc(N, sizeof(INT));
Net->Threshold = (INT*) calloc(N, sizeof(INT));
Net->Weight = (INT**) calloc(N, sizeof(INT*));
/******************************************************************************
PROPAGATING SIGNALS
******************************************************************************/
Changed = FALSE;
Sum = 0;
for (j=0; j<Net->Units; j++) {
Sum += Net->Weight[i][j] * Net->Output[j];
}
if (Sum != Net->Threshold[i]) {
if (Sum < Net->Threshold[i]) Out = LO;
if (Sum > Net->Threshold[i]) Out = HI;
if (Out != Net->Output[i]) {
Changed = TRUE;
Net->Output[i] = Out;
}
}
return Changed;
}
Iteration = 0;
IterationOfLastChange = 0;
do {
Iteration++;
if (PropagateUnit(Net, RandomEqualINT(0, Net->Units-1)))
IterationOfLastChange = Iteration;
} while (Iteration-IterationOfLastChange < 10*Net->Units);
}
/******************************************************************************
SIMULATING THE NET
******************************************************************************/
SetInput(Net, Input);
PropagateNet(Net);
GetOutput(Net, Output);
}
/******************************************************************************
MAIN
******************************************************************************/
void main()
{
NET Net;
INT n;
InitializeRandoms();
GenerateNetwork(&Net);
InitializeApplication(&Net);
CalculateWeights(&Net);
FinalizeApplication(&Net);
}
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
O O O O O -> O O O O O
OO OO OO OO OO OO
OO OO OO OO OO OO
OO OO OO OO
OO OO OO OO
OO OO OO OO OO OO
OO OO OO OO OO OO
OO OO OO OO
OO OO OO OO
OO OO OO OO OO OO
OO OO OO -> OO OO OO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO -> OOOOO
O O O O O O O O
O O O O O O
O O O O O O
O O O O O O O O
O O O O O O
O O O O O O
O O O O O O O O
O O O O O O
O O O O O O
O O O O -> O O O O
OOOOOOOOOO OOOOOOOOOO
O O O O
O OOOOOO O O OOOOOO O
OO OO OO OO
O O OO O O O O OO O O
O O OO O O O O OO O O
OO OO OO OO
O OOOOOO O O OOOOOO O
O O O O
OOOOOOOOOO -> OOOOOOOOOO
OOOOO
OOOOO
OOOOO
OOOOO
OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
O O O O O -> O O O O O
OOO O O OO OO OO
O OOO OO OO OO OO
O O OO O OO OO
OOO O OO OO
OO O OOO OO OO OO
O OOO O OO OO OO
O OO O O OO OO
O OOO OO OO
OO OOO O OO OO OO
O O OOO -> OO OO OO
OOOOO OOOOO
O O OOO OOOOO
O O OOO OOOOO
O O OOO OOOOO
OOOOO OOOOO
OOOOO OOOOO
OOO O O OOOOO
OOO O O OOOOO
OOO O O OOOOO
OOOOO -> OOOOO
O OOOO O O O O O
OO OOOO O O O
OOO OOOO O O O
OOOO OOOO O O O O
OOOO OOO O O O
OOOO OO O O O
O OOOO O O O O O
OO OOOO O O O
OOO OOOO O O O
OOOO OOOO -> O O O O
OOOOOOOOOO OOOOOOOOOO
O O O O
O O O OOOOOO O
O O OO OO
O OO O O O OO O O
O OO O O O OO O O
O O OO OO
O O O OOOOOO O
O O O O
OOOOOOOOOO -> OOOOOOOOOO
NETtalk is an artificial neural network. It is the result of research carried out in the
mid 1980s by Terrence Sejnowski and Charles Rosenberg. The intent behind NETtalk
was to construct simplified models that might shed light on the complexity of
learning human level cognitive tasks, and their implementation as a connectionist
model that could also learn to perform a comparable task.
NETtalk is a program that learns to pronounce written English text by being shown
text as input and matching audio for comparison.
To those that do not rigorously study neural networks and their limitations, it would
appear to be artificial intelligence in the truest sense of the word. Claims have been
printed in the past by some misinformed authors of NETtalk learning to read at the
level of a 4 year old human, in about 16 hours! Such a claim, while not an outright lie,
is an example of misunderstanding what human brains do when they read, and what
NETtalk is capable of learning. Being able to read and pronounce text is not the same
as actually comprehending what is being read and understanding in terms of actual
imagery and knowledge representation, and this is a key difference between a human
child learning to read and an experimental neural network such as NETtalk. In other
words, being able to pronounce "grandmother" is not the same as knowing who or
what a grandmother is, and how she relates to your immediate family, or what she
looks like. NETtalk does not specifically address human-level knowledge
representation or its complexities.
Smartphone App
Features
How it Works