solve some warning

Hey guys

Just would like someone to check my code i think there is some leakage of data and i have a lot of warnings, but i don't know how to fix it except for when it says conversion of int to float i just place a float(..) on it.

here it is, its quite long

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
float _angle = -70.0f;
vector<Point3f> vertexpoints;
float lenx=0.0,leny=0.0,lenz=0.0;
const double camlen=480.0;
const double camwidth=640.0;

//Draws the 3D scene
void drawScene() {
    //Clear information from last draw
	float vx=0.0,vy=0.0,vz=0.0,size=0.0;
	float vertsize=float(vertexpoints.size());
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
	
	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
	
	glTranslatef(0.0f, 0.0f, -8.0f);
		
	glRotatef(_angle, 0.0f, 1.0f, 0.0f);	//rotate by 70 degree anticlockwise

	glColor3f(1.0f, 1.0f, 0.0f);	//set color of shape orangey
	
	glBegin(GL_POINTS); //Begin points coordinates		
		for(float v=0.0;v<float(vertsize);v++)
		{
			vx=vertexpoints[v].x;
			vy=vertexpoints[v].y;
			vz=vertexpoints[v].z;
			glVertex3f((-1.5+(3.0*(vx/lenx))),(-1.0+2.0*(vy/leny)),(-1.5+(3.0*(vz/lenz))));
			//idea behind this is to limit the size between -1.5 to 1.5 for x and z direction and -1 to 1 for y direction
		}
    glEnd(); //End points coordinates
	
	//glPopMatrix(); // restore transform state	dont need it as only shape being drawn
	
	glutSwapBuffers(); //Send the 3D scene to the screen
}

bool PtInRect(float x, float y)
{
	if((x<camlen)&&(x>0)&&(y<camwidth)&&(y>0))
	{
		return true;
	}
	else return false;
}
							
int main(int argc, char** argv)
{
	//glut initializing function here

	Mat image, grayscale;
	Mat Rot, Tran;
	int numcorners;
	
	Mat icovar;
	Scalar meanmat;
	double covar[3][3]={{35.2189, 146.3495, 105.9640},{146.3495,801.1402,527.6974},{105.9640,527.6974,553.3654}};
	meanmat[0]=15.5662;
	meanmat[1]=118.3597;
	meanmat[2]=48.5153;

	Mat covmat(3,3,CV_64F,covar);
	
	Mat mask = Mat::zeros(camlen, camwidth, CV_8UC1);	//create matrix same size as image which is 480 by 640 based on the webcam capture
	icovar=inversemat(covmat);		//determinant of covariance matrix is zero. SOLVED
	
	double distance = 250;
	int elemsize=3;
	
	Mat element = getStructuringElement(0, Size( 2*elemsize + 1, 2*elemsize+1 ), Point( elemsize, elemsize ) );
	
	Mat corners;
	printf("Enter number of corners to detect (must be greater than 4) e.g 5: ");
    scanf("%d", &numcorners);
		
	vector<vector<Point3f>> object_points;
    vector<vector<Point2f>> image_points;

	vector<Point3f> obj;
	vector<Point2f> img;
    
	vector<Point3f> threedpoint;
	vector<Point2f> projectedpoints;
	
	Mat intrinsic = Mat(3, 3, CV_32FC1);
	Mat distCoeffs;
	vector<Mat> rvecs;
	vector<Mat> tvecs;
	
	intrinsic.ptr<float>(0)[0] = 1;
	intrinsic.ptr<float>(1)[1] = 1;
	int check=0;
	Mat silhouette;
	int objtemp=0;
	float xmax=580,xmin=80,ymin=190;
	VideoCapture webcam;
	webcam.open(-1);	
	
	//distance for note purpose
	//rectangle horizontally dot to dot 2620 vertically 1750mm
	//square horizontally dot to do 1733 vertically 1750mm
	printf("Enter the distance between the two marked corners in the x direction (mm): ");
	scanf("%f",&lenx);
	printf("---------------------------------------------------------------------\n");
	printf("NOTE:Make sure you Adjust the parameters of the camera i.e. brightness\nto the required amount.\n");
	printf("---------------------------------------------------------------------\n");
	printf("Enter the distance between the two marked corners in the y direction (mm): ");
	scanf("%f",&leny);
	printf("Enter the height of the object with extra 1cm for space (mm): ");
	scanf("%f",&lenz);

	int sz[] = {lenx,leny,lenz};
	Mat threedimension(3,sz,CV_32F,Scalar::all(1));  //create 3dim matrix, type 32 filled with 1s.                                            
													//same with point2f
	if(!webcam.isOpened())
	{
		cout<<"\nThe Camera is being used by another application, make sure all applications using the camera are closed and try running this program again."<<endl;
		system("PAUSE");
		return 0;
	}

	glutCreateWindow("Temporary Visual of 3D Model");
	initRendering();
	while(1)
	{
		//copy webcam stream to image
		webcam>>image;
		#pragma omp parallel sections
		{
			#pragma omp section
			{silhouette=imagesegmentation(image,icovar,meanmat,distance,mask,element); }
			#pragma omp section
			{corners=Cornerdetect(image,grayscale,corners,numcorners);}
		}

		if(corners.rows>4)
		{
			//#pragma omp parallel for
			for(int i=(corners.rows-4);i<corners.rows;i++)
			{
				if((corners.at<float>(i,0)>xmin)&&(corners.at<float>(i,1)>ymin)&&(corners.at<float>(i,0)<xmax))
				{
					//draws circle on image, at centre at point, color, thickness, line type, 
					circle(image,corners.at<Point2f>(i),3,CV_RGB(255,0,0),1,8,0);
					obj.push_back(Point3f(float(objtemp/2), float(objtemp%2), 0.0f));		//setting up the units of calibration
					img.push_back(corners.at<Point2f>(i));		
					objtemp++;
				}
			}
			if(objtemp==4)
			{
				image_points.push_back(img);
				object_points.push_back(obj);
				calibrateCamera(object_points, image_points, image.size(), intrinsic, distCoeffs, rvecs, tvecs); 
				Rot=rvecs[0];
				Tran=tvecs[0];
				cout<<threedimension.at<int>(1,1,2); //the value is 65231512 something like this for a lot of them
				//#pragma omp parallel for	//unhandled exception error
				for(int l=0;l<int(lenx);l++)
				{
					for(int w=0;w<int(leny);w++)
					{
						for(int h=0;h<int(lenz);h++)
						{
							threedpoint.push_back(Point3f(l,w,h));		
							projectPoints(threedpoint,Rot,Tran,intrinsic,distCoeffs,projectedpoints);
							cout<<"X= "<<projectedpoints[0].x<<" Y= "<<projectedpoints[0].y<<endl;
							if(PtInRect(projectedpoints[0].x,projectedpoints[0].y)==1)
							{
								//access each element of threedimension.at<float>(x,y,z)
								check = threedimension.at<int>(l,w,h);
								if(check==1)   //-1 rows and -1 cols check into it, solved cannot check the matrix directly so assign it to a variable then check
								{
									vertexpoints.push_back(Point3f(l,w,h));
									if(int(mask.at<uchar>(projectedpoints[0]))==255)
									{
										threedimension.at<float>(l,w,h)=0;
									}
							
								}
							}
							projectedpoints.clear();
							threedpoint.clear();
						}
					}
				}
				imshow("original", image);
				waitKey(30);
				imshow("mask",mask);
				waitKey(30);			//this is to give the processor some time to display the image
				//rendering over here with the displays, loop through each points which is a one to get a list of vectors (vertexpoints) then draw using those points by looping through it
				//after rendering clear it so it doesn't stack on top		
				glutKeyboardFunc(handleKeypress);
				glutReshapeFunc(handleResize);
				glutDisplayFunc(drawScene);
				glutTimerFunc(25, Update, 0); //call update function every 25ms.decrease and it will call it quicker and will rotate faster
				glutMainLoop();
			}
		}
		objtemp=0;
		img.clear();
		obj.clear();
		image_points.clear();
		object_points.clear();
	}
	cout<<"it looks like you have finished rendering and now it will display the final 3d model"<<endl;
	// final function for marching cubes algorithm most likely a for loop so would use #pragma omp parallel for

	webcam.release();
	system("PAUSE");
	return 0;
}


threedimension is a 3d array filled with ones but for some reason near the end it is filled with a high number.

there are a lot of warnings saying conversion of int to float, float to unsigned int


c:\opencv2.4.3\build\include\opencv2\flann\logger.h(66): warning C4996: 'fopen': This function or variable may be unsafe. Consider using fopen_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. See online help for details.
1> c:\program files\microsoft visual studio 10.0\vc\include\stdio.h(234) : see declaration of 'fopen'
1>c:\users\safiyullaah\documents\visual studio 2010\projects\3d image reconstruction\3d image reconstruction\main.cpp(98): warning C4244: 'argument' : conversion from 'float' to 'unsigned int', possible loss of data
1>c:\users\safiyullaah\documents\visual studio 2010\projects\3d image reconstruction\3d image reconstruction\main.cpp(99): warning C4244: 'argument' : conversion from 'float' to 'unsigned int', possible loss of data
1>c:\users\safiyullaah\documents\visual studio 2010\projects\3d image reconstruction\3d image reconstruction\main.cpp(100): warning C4244: 'argument' : conversion from 'float' to 'unsigned int', possible loss of data


would appreciate someone's help to correct any mistakes i have made in the program, which i think maybe the problem of threedimension and mask matrices and projectpoints function not working properly

thanks
It's simple. The warnings are telling you exactly what to do.

Replace fopen with fopen_s

Your using unsigned nits instead of float, so you may lose floating point data, 1.15 stored in a int will yield 1.

so just use floats
Hi asda333,

It is good to see this code in the context of your other topic.

Some other things:

You have too much code in main() (nearly 200LOC) - break it up into functions, it will make life much easier.

The things I mentioned in the other topic should help with your problems.

although the line numbers in the errors don't seem to match your code.

Also just wondering what compiler warning options you have set? It is a good idea to put them at their maximum.
it says to set it to float but the error still occurst e.g. v=vertexpoints[k].x
i change it to float(vertexpoints[k].x) but still says unsigned ints i don't use unsigned ints.

how do i set the compiler warning options to max.
most of the code in my program are variables.
I also noticed line 24 - never use floats in a for loop like that. Floats are stored as binary fractions, so doing this will cause you some drama.

It would be good if you could use cin & cout rather than scanf & printf, seen as you are doing C++. If using scanf, always check the return value to see how well it worked, otherwise you could easily have garbage values.

With std::cout, you don't have to have it all on one line like you have on line 118, you can build it up in stages - put it an std::endl or '\n' where required. I am also not a big fan of really long comments on one line.

Line 142 looks like another place to use PtInRect.

Global variables are not such a good idea, and I am not a fan of multiple declarations on one line. You have camlen & camwidth as doubles, but everywhere else you have floats. You use these in the PtInRect function but the arguments are floats, so you have unnecessary implicit casting. In general you have a lot of casting going on everywhere, see if you can cut it down a bit.

With comments, I quite often write what I want to do as comments, then go back & write the code. It is not a bad way of organising your thoughts, serves as a bit of documentation and makes it easier for others to understand. As well as that choose meaningful variable names - they can make it fairly obvious what is going on by themselves.

Edit: Plus splitting into functions which have meaningful names helps also.

Hope this helps.
Last edited on
cout<<threedimension.at<int>(1,1,2); //the value is 65231512 something like this for a lot of them

But threedimension is a Matrix of floats, I am guessing the at<int> isn't doing casting, rather I am guessing it should reflect the underlying type which is float, so try this:

1
2
unsigned TheValue = static_cast<unsigned>( threedimension.at<float>(1,1,2) );
std::cout << TheValue<< "\n";


I used the C++ casting, as opposed to the C casting. Is the same thing happening on line 175 with the uchar?

it says to set it to float but the error still occurst e.g. v=vertexpoints[k].x
i change it to float(vertexpoints[k].x) but still says unsigned ints i don't use unsigned ints.


I don't see where you refer to vertexpoints in your code like this, all you do is push into it.

how do i set the compiler warning options to max.


I don't use VS, but it can't be that hard.

most of the code in my program are variables.


Was that in reply to my suggestion of breaking the code up into functions? If so, the bodies of loops are good candidates for putting into a function, just send the function whatever variables it needs.

Why does a camera image need a 3d array ? Isn't it a a 2d array of colours?


Can you show your errors as they came from the compiler exactly?
Last edited on
Continuing on from the other topic:

Still confused about the 3d array:

160
161
162
for(int l=0;l<int(lenx);l++) {
	for(int w=0;w<int(leny);w++) {
		for(int h=0;h<int(lenz);h++) {}


If the dimensions of this were all 10, there are 1000 values. With an image, wouldn't there only be 100 values? How can a camera image have a height as well as x,y? I am missing the whole point I think.

the loop is actually just generating the 3D points to use for the projectpoints function, so if i have a dimension of x=10, y=10, z=10
i want to generate the 3d points 0,0,0 0,0,1 all the way upto 9,9,9


This is 1000 values, which is different to 100 xyz values. This what confuses me the most.

To compare equality for FP numbers, you have take the absolute value of the difference between the 2 numbers, and see if that is less than a predetermined precision value (0.001 say) :

1
2
3
4
5
6
7
8
9
10
11
12
const float PRECISION = 0.001f;

float a = 0.1f; // really 0.0999997
float b = 10.0f * a; // 0.9999997
float c =1.0f;

if (std::abs (c - b) < PRECISION ) {
     std::cout << "numbers c & b equal" << "\n";
}
else {
     std::cout << "numbers c & b NOT equal" << "\n";
}
first here are the warnings i get

1>c:\opencv2.4.3\build\include\opencv2\flann\logger.h(66): warning C4996: 'fopen': This function or variable may be unsafe. Consider using fopen_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. See online help for details.
1> c:\program files\microsoft visual studio 10.0\vc\include\stdio.h(234) : see declaration of 'fopen'

3d image reconstruction\main.cpp(103): warning C4244: 'argument' : conversion from 'double' to 'GLfloat', possible loss of data

3d image reconstruction\main.cpp(103): warning C4244: 'argument' : conversion from 'double' to 'GLfloat', possible loss of data

3d image reconstruction\main.cpp(103): warning C4244: 'argument' : conversion from 'double' to 'GLfloat', possible loss of data

3d image reconstruction\main.cpp(153): warning C4244: 'argument' : conversion from 'const float' to 'int', possible loss of data

3d image reconstruction\main.cpp(153): warning C4244: 'argument' : conversion from 'const float' to 'int', possible loss of data

3d image reconstruction\main.cpp(201): warning C4244: 'initializing' : conversion from 'float' to 'int', possible loss of data

3d image reconstruction\main.cpp(201): warning C4244: 'initializing' : conversion from 'float' to 'int', possible loss of data

3d image reconstruction\main.cpp(201): warning C4244: 'initializing' : conversion from 'float' to 'int', possible loss of data

3d image reconstruction\main.cpp(268): warning C4244: 'argument' : conversion from 'int' to 'float', possible loss of data

3d image reconstruction\main.cpp(268): warning C4244: 'argument' : conversion from 'int' to 'float', possible loss of data

3d image reconstruction\main.cpp(268): warning C4244: 'argument' : conversion from 'int' to 'float', possible loss of data

3d image reconstruction\main.cpp(273): warning C4018: '<' : signed/unsigned mismatch

3d image reconstruction\main.cpp(279): warning C4244: 'argument' : conversion from 'float' to 'int', possible loss of data

3d image reconstruction\main.cpp(279): warning C4244: 'argument' : conversion from 'float' to 'int', possible loss of data

3d image reconstruction\main.cpp(279): warning C4244: 'argument' : conversion from 'float' to 'int', possible loss of data

3d image reconstruction\main.cpp(286): warning C4244: 'argument' : conversion from 'float' to 'int', possible loss of data

3d image reconstruction\main.cpp(286): warning C4244: 'argument' : conversion from 'float' to 'int', possible loss of data

3d image reconstruction\main.cpp(286): warning C4244: 'argument' : conversion from 'float' to 'int', possible loss of data
and to your question,

Continuing on from the other topic:

Still confused about the 3d array:

for(int l=0;l<int(lenx);l++) {
for(int w=0;w<int(leny);w++) {
for(int h=0;h<int(lenz);h++) {}


If the dimensions of this were all 10, there are 1000 values. With an image, wouldn't there only be 100 values? How can a camera image have a height as well as x,y? I am missing the whole point I think.


there is a 3d array which contains 1s and 0s, this loop generates all the coordinates to the elements of that 3d array, a 1 represents a texture and a 0 represents a hole, i will be rendering the parts that contain 1 and produce a 3d model.

the only camera image there is the mask, which i do not change.

the loop is actually just generating the 3D points to use for the projectpoints function, so if i have a dimension of x=10, y=10, z=10
i want to generate the 3d points 0,0,0 0,0,1 all the way upto 9,9,9

exactly, and it will give me the 2d points, as previously i calibrated the camera with the 4 corners of a sheet with range of 0,0,0, 0,9,0 9,0,0 and 9,9,0 so anything inbetween will give me 2d points between the corners.

This is 1000 values, which is different to 100 xyz values. This what confuses me the most.

exactly as we have a 3d array the volume is bound to be 1000 elements not 100 x,y,z values probably the length and height and width would be a hundred but not the volume.

To compare equality for FP numbers, you have take the absolute value of the difference between the 2 numbers, and see if that is less than a predetermined precision value (0.001 say) :

const float PRECISION = 0.001f;

float a = 0.1f; // really 0.0999997
float b = 10.0f * a; // 0.9999997
float c =1.0f;

if (std::abs (c - b) < PRECISION ) {
std::cout << "numbers c & b equal" << "\n";
}
else {
std::cout << "numbers c & b NOT equal" << "\n";
}


so you have to compare the difference you can't compare the actual values, man that is the long way round.

i also changed my code according to what you wrote on your first two posts.

its starting to get there.

also i use global variables so it can be accessed by external functions, which are prebuilt to be void input and void output.
Last edited on
also if array is 173x175x140
total elements in array would be 4,238,500

so how should i change

1
2
3
4
5
if(int(mask.at<uchar>(projectedpoints[index]))==255)
							{
								
								threedimension.at<float>(dx,dy,dz)=0.0;
							}

as i think it crashes because the number is too big and the precision of mask is small and crashes

should i change it to float(mask.at<uchar>(projectedpoints[index]))==255

i want it to handle any large number

what else should i use besides global variables.

I have a function which detects a keypress which is already prebuilt and the only way i can make anything happen to main from the function is by changing a global variable.
Last edited on
Crikey it is all too confusing to me.

The warnings you posted don't match the code you posted - Warnings mention line 286, but there is only 213 LOC in main(). Could you update the code?

I did show a way of comparing equality with floats, but you might be better with the static_cast I mentioned earlier - did you try that?

there is a 3d array which contains 1s and 0s, this loop generates all the coordinates to the elements of that 3d array, a 1 represents a texture and a 0 represents a hole, i will be rendering the parts that contain 1 and produce a 3d model.


I still think your concept of 3d points is not right. A 3D array is not the right container for 3D points. As you said before the projectpoints function only needs the coordinates, but now you are talking about a texture as well. Even if you wanted to store a texture along with it's 3d point, I still wouldn't use a 3d array.

I am an Engineering Surveyor with 27 years experience: I deal with 3d co-ords every day and I have been doing bits & pieces of programming for about the same amount of time. I would store 3d points as a vector of structs containing the x,y,z values. To help compare to your example: if you knew there 100 of them, in a [100][3] array. But you are saying that you have a space that is 100 by 100 by 150 say, so you use a [100][100][150] array as shown by this code:

Mat threedimension(3,sz,CV_32F,Scalar::all(1)); //create 3dim matrix, type 32 filled with 1s

Which is wrong for two reasons:

1. It uses vastly more memory than needed (150 times too much), even if each position is a tile, there is a stack of 150 tiles for each xy position, when you only need 1 tile per xyz 3d position.

2. If the coordinates are sparse - not a coord for each tile, then again it uses massively too much memory.

So it is best to have a vector of 3d points, with the ranges of the xyz matching the size of the space. You actually have exactly that for the threedpoint, as shown by this code:

threedpoint.push_back(Point3f(l,w,h));

BUT because it is inside 3 nested for loops, you create lenz times too much data. If lenz is 150 - you 150 times too much data - 150 z values for each xy value.

Here is some code that creates data for a 3d surface, where z = 2 *x:

1
2
3
4
5
for(int l=0;l<int(lenx);l++) {
	for(int w=0;w<int(leny);w++){
              threedpoint.push_back(Point3f(l,w,2*l));
        }
}


Now there is only 1 z value for each xy value.

So to store 3d points with a texture value for each one, can you make your own struct which has x,y,z, Texture in it? Then push instants of it into a vector.

Edit: Or a struct with Point3f, Texture in it.

Now as I said last time, I think the creation of the points and the projection of them should be outside the loops.

After all this discussion, I found this example :

http://programmingexamples.net/wiki/OpenCV/WishList/ProjectPoints


Anyway, it is late (or early) at this end (02:45AM) so I am going to retire. Hope I have been some help.
Last edited on
the code is quite long, so i wont be able to post it all here i can upload the text file.

i didn't try the static_cast as i figured out the error i just had to replace int with float and it worked.

I still think your concept of 3d points is not right. A 3D array is not the right container for 3D points. As you said before the projectpoints function only needs the coordinates, but now you are talking about a texture as well. Even if you wanted to store a texture along with it's 3d point, I still wouldn't use a 3d array.


sorry about the confusion, you don't have to worry about the texture, the 3d array is filled with ones and is changed to zero as explained previously.

there is not supposed to be one z value for each x,y value there should be more depending on the size of the object in the z direction.

adding struct would not work, because a 3d array better represents the model than a vector of points.

i got to solve the projectpoints problem, since i calibrated the camera with coordinates (0,0,0) (0,1,0) (1,0,0) (1,1,0) and i am trying to project coordinates outside this limit, it is bound to get big and small results so calibrated it with (0,0,0) (0,leny,0) (lenx,0,0) (lenx,leny,0) and it worked.

thanks for the help

can i send you the code and you can maybe look through it and make the necessary changes and i will see if it performs as i wanted it to.

any website i can upload a text file

thanks
Hi asda333,

Well good to hear you have it working the way you want.

My apologies, if I am wasting your time explaining to me what you are trying to do, but some of the things just don't make sense to me.

Although from your description of why it didn't work before, could it possibly be solved using an appropriate value for scale part of the transformation? Large Real World -> scale factor -> small image plane.

Just wondering what you thought of the example code I found? It seems much simpler than yours - never mind that they hard coded the 3d points, it was much more like what I would have expected, and they didn't use any 3d arrays.

The following still doesn't seem right to me, for the two reasons I gave in my last post:

there is not supposed to be one z value for each x,y value there should be more depending on the size of the object in the z direction.

adding struct would not work, because a 3d array better represents the model than a vector of points.


Even if a 3d point represented an individual pixel,and there were multiple pixels per xy point, using a 3d array is still massively wasteful IMO, and I still don't understand why you would do this for relatively large real world coordinates. I realise that it is the texture data that is in the 3d array, but it still has real world 3d coords, so what I am saying applies to it.

It just seems like a recipe for a huge waste of memory.

To give an example from my work, I make 3d models all the time, so I can do contour plans and calculate volumes etc. The software I use certainly does not store them in 3d arrays, they are conceptually a vector of 3d points.

From reading the documentation, it seems the purpose of the projectpoints function is to transform the real world 3d coordinates onto a projection plane that could be used to make a display on the screen. So I can imagine a 3d model of a scene, which is made of 3d points to define surfaces, and you want to be able to move & rotate the "camera", to give the impression of moving around in a scene. The camera is a virtual one in that scenario, although you seem to be using a real one. I am not sure how that part works either. If the mask is going to hide points projected from the 3d model, wouldn't the camera image have to be made from the model?

Just wondering, did you fix up this part of your code?

24
25
26
27
28
29
30
31
32
		
for(float v=0.0;v<float(vertsize);v++)
		{
			vx=vertexpoints[v].x;
			vy=vertexpoints[v].y;
			vz=vertexpoints[v].z;
			glVertex3f((-1.5+(3.0*(vx/lenx))),(-1.0+2.0*(vy/leny)),(-1.5+(3.0*(vz/lenz))));
			//idea behind this is to limit the size between -1.5 to 1.5 for x and z direction and -1 to 1 for y direction
		}


vertexpoints doesn't have anything in it when you assign to vertsize, you are using floats in the for loop, floats for the subscript, & is there anything in vertexpoints?

There are lots of sites where you can post code, dropbox & ideone come to mind. It would be good if you could use one that show s the text directly, rather than having to download some other file format.

One more thing, if you configure you IDE to make indenting 4 spaces, rather than a tab - it will display much better on this page. This page makes tabs 8 spaces - that is why it seems to indent so much.
no worries


Although from your description of why it didn't work before, could it possibly be solved using an appropriate value for scale part of the transformation? Large Real World -> scale factor -> small image plane.


I don't understand what you mean, but are you referring to the scale parameter or the calibration process of getting negative numbers from project points. yes it could have by multiplying it with the code but will result in smaller model i think.

i have seen that site before, it demonstrates the use of projectpoints, it does not create a bounding box representing the 3d model it only demonstrates how to use the projectpoints function.


Even if a 3d point represented an individual pixel,and there were multiple pixels per xy point, using a 3d array is still massively wasteful IMO, and I still don't understand why you would do this for relatively large real world coordinates. I realise that it is the texture data that is in the 3d array, but it still has real world 3d coords, so what I am saying applies to it.

It just seems like a recipe for a huge waste of memory.


what do you suggest besides creating a vector of threedpoints, i would like to build a dynamic 3D array.

arrays help make it easier to locate a certain element in the array using 3d coordinates.

i'm not so good with creating programs which are memory friendly.

how would you do it.


To give an example from my work, I make 3d models all the time, so I can do contour plans and calculate volumes etc. The software I use certainly does not store them in 3d arrays, they are conceptually a vector of 3d points.


Interesting. so you have knowledge in this field


The camera is a virtual one in that scenario, although you seem to be using a real one. I am not sure how that part works either. If the mask is going to hide points projected from the 3d model, wouldn't the camera image have to be made from the model?


the camera is used to capture an image of an object at a certain viewpoint and so when the camera is moved the parameters changed ie. rotation and translation and a new image is captured of the object at a different position, so the point is not in the camera being virtual capturing the object at different view points via live feed from the camera.

each image is converted to a mask distinguishing the foreground from the backgroiund, the mask does not hide the points projected but rather uses it as a location reference to identifiy whether it lies inside the silhouette or no and the camera model is identified by its parameters, its not camera image but image from camera, i don't know what you mean made from the model, but im guessing its just a mistake as nothing is made from the model but parameters are calculated to help project 3d points onto a 2d plane, the 2d plane being the mask.

I didn't fix that part yet, i just removed the float from the loop initialization variables and left it as it is.

vertexpoints contains the 3D coordinates, which contained a one in the 3d array and the 2d equivalent was found to be inside the silhouette, so it is not empty.

here is the code
http://ideone.com/zs58Mq

btw the way do you know how i can use opengl to render a 3d model by rendering the surface only e.g.
if a voxel is on the edge of the model push that point into vertex points

i want to render it as a solid object, but when ever i render it, it either displays lines, triangles in rows i want a way to render the object as a solid model.

thanks
if you can change it and get it to still behave like the original code than i will accept it.
Hi,

I am not going to attempt to change your code, I don't really understand the overall objective of it, I would need to install opencv & other packages onto my system, and I explained about what I thought was strange. I also don't want to waste more of your time in explaining your code.

However I did learn some stuff, so thanks.

Hope everything works out at your end. Cheers
thanks for all the help

i got it to work in the end even though there is a lot of memory wastage so i implemented some openmp.

i'm glad you also benefited from it
thanks
Topic archived. No new replies allowed.