|
@@ -58,6 +58,34 @@ OpenCVTexture::
|
|
|
~OpenCVTexture() {
|
|
~OpenCVTexture() {
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+////////////////////////////////////////////////////////////////////
|
|
|
|
|
+// Function: OpenCVTexture::consider_update
|
|
|
|
|
+// Access: Protected, Virtual
|
|
|
|
|
+// Description: Calls update_frame() if the current frame has
|
|
|
|
|
+// changed.
|
|
|
|
|
+////////////////////////////////////////////////////////////////////
|
|
|
|
|
+void OpenCVTexture::
|
|
|
|
|
+consider_update() {
|
|
|
|
|
+ int this_frame = ClockObject::get_global_clock()->get_frame_count();
|
|
|
|
|
+ if (this_frame != _last_frame_update) {
|
|
|
|
|
+ int frame = get_frame();
|
|
|
|
|
+ if (_current_frame != frame) {
|
|
|
|
|
+ update_frame(frame);
|
|
|
|
|
+ _current_frame = frame;
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // Loop through the pages to see if there's any camera stream to update.
|
|
|
|
|
+ int max_z = max(_z_size, (int)_pages.size());
|
|
|
|
|
+ for (int z = 0; z < max_z; ++z) {
|
|
|
|
|
+ VideoPage &page = _pages[z];
|
|
|
|
|
+ if (!page._color.is_from_file() || !page._alpha.is_from_file()) {
|
|
|
|
|
+ update_frame(frame, z);
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ _last_frame_update = this_frame;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
////////////////////////////////////////////////////////////////////
|
|
////////////////////////////////////////////////////////////////////
|
|
|
// Function: OpenCVTexture::do_make_copy
|
|
// Function: OpenCVTexture::do_make_copy
|
|
|
// Access: Protected, Virtual
|
|
// Access: Protected, Virtual
|
|
@@ -97,23 +125,46 @@ do_assign(const OpenCVTexture ©) {
|
|
|
// is specified) to accept its input from the camera
|
|
// is specified) to accept its input from the camera
|
|
|
// with the given index number, or the default camera if
|
|
// with the given index number, or the default camera if
|
|
|
// the index number is -1 or unspecified.
|
|
// the index number is -1 or unspecified.
|
|
|
|
|
+//
|
|
|
|
|
+// If alpha_file_channel is 0, then the camera image
|
|
|
|
|
+// becomes a normal RGB texture. If it is 1, 2, or 3,
|
|
|
|
|
+// then the camera image becomes an alpha texture, using
|
|
|
|
|
+// the indicated channel of the source.
|
|
|
////////////////////////////////////////////////////////////////////
|
|
////////////////////////////////////////////////////////////////////
|
|
|
bool OpenCVTexture::
|
|
bool OpenCVTexture::
|
|
|
-from_camera(int camera_index, int z, const LoaderOptions &options) {
|
|
|
|
|
|
|
+from_camera(int camera_index, int z, int alpha_file_channel,
|
|
|
|
|
+ const LoaderOptions &options) {
|
|
|
if (!do_reconsider_z_size(z)) {
|
|
if (!do_reconsider_z_size(z)) {
|
|
|
return false;
|
|
return false;
|
|
|
}
|
|
}
|
|
|
nassertr(z >= 0 && z < get_z_size(), false);
|
|
nassertr(z >= 0 && z < get_z_size(), false);
|
|
|
|
|
|
|
|
|
|
+ _alpha_file_channel = alpha_file_channel;
|
|
|
|
|
+
|
|
|
VideoPage &page = modify_page(z);
|
|
VideoPage &page = modify_page(z);
|
|
|
- page._alpha.clear();
|
|
|
|
|
- if (!page._color.from_camera(camera_index)) {
|
|
|
|
|
- return false;
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ if (alpha_file_channel == 0) {
|
|
|
|
|
+ // A normal RGB texture.
|
|
|
|
|
+ page._alpha.clear();
|
|
|
|
|
+ if (!page._color.from_camera(camera_index)) {
|
|
|
|
|
+ return false;
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- if (!do_reconsider_video_properties(page._color, 3, z, options)) {
|
|
|
|
|
|
|
+ if (!do_reconsider_video_properties(page._color, 3, z, options)) {
|
|
|
|
|
+ page._color.clear();
|
|
|
|
|
+ return false;
|
|
|
|
|
+ }
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // An alpha texture.
|
|
|
page._color.clear();
|
|
page._color.clear();
|
|
|
- return false;
|
|
|
|
|
|
|
+ if (!page._alpha.from_camera(camera_index)) {
|
|
|
|
|
+ return false;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (!do_reconsider_video_properties(page._alpha, 1, z, options)) {
|
|
|
|
|
+ page._alpha.clear();
|
|
|
|
|
+ return false;
|
|
|
|
|
+ }
|
|
|
|
|
+ do_set_format(F_alpha);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
set_loaded_from_image();
|
|
set_loaded_from_image();
|
|
@@ -161,7 +212,15 @@ do_reconsider_video_properties(const OpenCVTexture::VideoStream &stream,
|
|
|
<< "Loaded " << stream._filename << ", " << num_frames << " frames at "
|
|
<< "Loaded " << stream._filename << ", " << num_frames << " frames at "
|
|
|
<< frame_rate << " fps\n";
|
|
<< frame_rate << " fps\n";
|
|
|
}
|
|
}
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // In this case, we don't have a specific frame rate or number of
|
|
|
|
|
+ // frames. Let both values remain at 0.
|
|
|
|
|
+ if (vision_cat.is_debug()) {
|
|
|
|
|
+ vision_cat.debug()
|
|
|
|
|
+ << "Loaded camera stream\n";
|
|
|
|
|
+ }
|
|
|
}
|
|
}
|
|
|
|
|
+
|
|
|
int width = (int)cvGetCaptureProperty(stream._capture, CV_CAP_PROP_FRAME_WIDTH);
|
|
int width = (int)cvGetCaptureProperty(stream._capture, CV_CAP_PROP_FRAME_WIDTH);
|
|
|
int height = (int)cvGetCaptureProperty(stream._capture, CV_CAP_PROP_FRAME_HEIGHT);
|
|
int height = (int)cvGetCaptureProperty(stream._capture, CV_CAP_PROP_FRAME_HEIGHT);
|
|
|
|
|
|
|
@@ -238,74 +297,91 @@ update_frame(int frame) {
|
|
|
////////////////////////////////////////////////////////////////////
|
|
////////////////////////////////////////////////////////////////////
|
|
|
void OpenCVTexture::
|
|
void OpenCVTexture::
|
|
|
update_frame(int frame, int z) {
|
|
update_frame(int frame, int z) {
|
|
|
- vision_cat.spam() << "Updating OpenCVTexture page " << z << "\n";
|
|
|
|
|
|
|
+ if (vision_cat.is_spam()) {
|
|
|
|
|
+ vision_cat.spam()
|
|
|
|
|
+ << "Updating OpenCVTexture page " << z << "\n";
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
VideoPage &page = _pages[z];
|
|
VideoPage &page = _pages[z];
|
|
|
if (page._color.is_valid() || page._alpha.is_valid()) {
|
|
if (page._color.is_valid() || page._alpha.is_valid()) {
|
|
|
do_modify_ram_image();
|
|
do_modify_ram_image();
|
|
|
|
|
+ ++_image_modified;
|
|
|
}
|
|
}
|
|
|
|
|
+ ssize_t dest_x_pitch = _num_components * _component_width;
|
|
|
|
|
+ ssize_t dest_y_pitch = _x_size * dest_x_pitch;
|
|
|
|
|
+
|
|
|
if (page._color.is_valid()) {
|
|
if (page._color.is_valid()) {
|
|
|
nassertv(get_num_components() >= 3 && get_component_width() == 1);
|
|
nassertv(get_num_components() >= 3 && get_component_width() == 1);
|
|
|
|
|
|
|
|
- const unsigned char *source = page._color.get_frame_data(frame);
|
|
|
|
|
- if (source != NULL) {
|
|
|
|
|
|
|
+ const unsigned char *r, *g, *b;
|
|
|
|
|
+ ssize_t x_pitch, y_pitch;
|
|
|
|
|
+ if (page._color.get_frame_data(frame, r, g, b, x_pitch, y_pitch)) {
|
|
|
nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size);
|
|
nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size);
|
|
|
unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z;
|
|
unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z;
|
|
|
|
|
|
|
|
- int dest_row_width = (_x_size * _num_components * _component_width);
|
|
|
|
|
- int source_row_width = get_video_width() * 3;
|
|
|
|
|
-
|
|
|
|
|
- if (get_num_components() == 3) {
|
|
|
|
|
|
|
+ if (_num_components == 3 && x_pitch == 3) {
|
|
|
// The easy case--copy the whole thing in, row by row.
|
|
// The easy case--copy the whole thing in, row by row.
|
|
|
|
|
+ ssize_t copy_bytes = get_video_width() * dest_x_pitch;
|
|
|
|
|
+ nassertv(copy_bytes <= dest_y_pitch && copy_bytes <= abs(y_pitch));
|
|
|
|
|
+
|
|
|
for (int y = 0; y < get_video_height(); ++y) {
|
|
for (int y = 0; y < get_video_height(); ++y) {
|
|
|
- memcpy(dest, source, source_row_width);
|
|
|
|
|
- dest += dest_row_width;
|
|
|
|
|
- source += source_row_width;
|
|
|
|
|
|
|
+ memcpy(dest, r, copy_bytes);
|
|
|
|
|
+ dest += dest_y_pitch;
|
|
|
|
|
+ r += y_pitch;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
} else {
|
|
|
- // The harder case--interleave the color in with the alpha,
|
|
|
|
|
- // pixel by pixel.
|
|
|
|
|
- nassertv(get_num_components() == 4);
|
|
|
|
|
|
|
+ // The harder case--interleave in the color channels, pixel by
|
|
|
|
|
+ // pixel, possibly leaving room for alpha.
|
|
|
|
|
+
|
|
|
for (int y = 0; y < get_video_height(); ++y) {
|
|
for (int y = 0; y < get_video_height(); ++y) {
|
|
|
- int dx = 0;
|
|
|
|
|
- int sx = 0;
|
|
|
|
|
|
|
+ ssize_t dx = 0;
|
|
|
|
|
+ ssize_t sx = 0;
|
|
|
for (int x = 0; x < get_video_width(); ++x) {
|
|
for (int x = 0; x < get_video_width(); ++x) {
|
|
|
- dest[dx] = source[sx];
|
|
|
|
|
- dest[dx + 1] = source[sx + 1];
|
|
|
|
|
- dest[dx + 2] = source[sx + 2];
|
|
|
|
|
- dx += 4;
|
|
|
|
|
- sx += 3;
|
|
|
|
|
|
|
+ dest[dx] = r[sx];
|
|
|
|
|
+ dest[dx + 1] = g[sx];
|
|
|
|
|
+ dest[dx + 2] = b[sx];
|
|
|
|
|
+ dx += dest_x_pitch;
|
|
|
|
|
+ sx += x_pitch;
|
|
|
}
|
|
}
|
|
|
- dest += dest_row_width;
|
|
|
|
|
- source += source_row_width;
|
|
|
|
|
|
|
+ dest += dest_y_pitch;
|
|
|
|
|
+ r += y_pitch;
|
|
|
|
|
+ g += y_pitch;
|
|
|
|
|
+ b += y_pitch;
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
if (page._alpha.is_valid()) {
|
|
if (page._alpha.is_valid()) {
|
|
|
- nassertv(get_num_components() == 4 && get_component_width() == 1);
|
|
|
|
|
|
|
+ nassertv(get_component_width() == 1);
|
|
|
|
|
|
|
|
- const unsigned char *source = page._alpha.get_frame_data(frame);
|
|
|
|
|
- if (source != NULL) {
|
|
|
|
|
|
|
+ const unsigned char *source[3];
|
|
|
|
|
+ ssize_t x_pitch, y_pitch;
|
|
|
|
|
+ if (page._alpha.get_frame_data(frame, source[0], source[1], source[2],
|
|
|
|
|
+ x_pitch, y_pitch)) {
|
|
|
nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size);
|
|
nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size);
|
|
|
unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z;
|
|
unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z;
|
|
|
|
|
|
|
|
- int dest_row_width = (_x_size * _num_components * _component_width);
|
|
|
|
|
- int source_row_width = get_video_width() * 3;
|
|
|
|
|
-
|
|
|
|
|
// Interleave the alpha in with the color, pixel by pixel.
|
|
// Interleave the alpha in with the color, pixel by pixel.
|
|
|
// Even though the alpha will probably be a grayscale video,
|
|
// Even though the alpha will probably be a grayscale video,
|
|
|
// the OpenCV library presents it as RGB.
|
|
// the OpenCV library presents it as RGB.
|
|
|
|
|
+ const unsigned char *sch = source[0];
|
|
|
|
|
+ if (_alpha_file_channel >= 1 && _alpha_file_channel <= 3) {
|
|
|
|
|
+ sch = source[_alpha_file_channel - 1];
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
for (int y = 0; y < get_video_height(); ++y) {
|
|
for (int y = 0; y < get_video_height(); ++y) {
|
|
|
- int dx = 3;
|
|
|
|
|
- int sx = _alpha_file_channel;
|
|
|
|
|
|
|
+ // Start dx at _num_components - 1, which writes to the last
|
|
|
|
|
+ // channel, i.e. the alpha channel.
|
|
|
|
|
+ ssize_t dx = (_num_components - 1) * _component_width;
|
|
|
|
|
+ ssize_t sx = 0;
|
|
|
for (int x = 0; x < get_video_width(); ++x) {
|
|
for (int x = 0; x < get_video_width(); ++x) {
|
|
|
- dest[dx] = source[sx];
|
|
|
|
|
- dx += 4;
|
|
|
|
|
- sx += 3;
|
|
|
|
|
|
|
+ dest[dx] = sch[sx];
|
|
|
|
|
+ dx += dest_x_pitch;
|
|
|
|
|
+ sx += x_pitch;
|
|
|
}
|
|
}
|
|
|
- dest += dest_row_width;
|
|
|
|
|
- source += source_row_width;
|
|
|
|
|
|
|
+ dest += dest_y_pitch;
|
|
|
|
|
+ sch += y_pitch;
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
@@ -478,14 +554,33 @@ OpenCVTexture::VideoStream::
|
|
|
////////////////////////////////////////////////////////////////////
|
|
////////////////////////////////////////////////////////////////////
|
|
|
// Function: OpenCVTexture::VideoStream::get_frame_data
|
|
// Function: OpenCVTexture::VideoStream::get_frame_data
|
|
|
// Access: Public
|
|
// Access: Public
|
|
|
-// Description: Returns the pointer to the beginning of the
|
|
|
|
|
|
|
+// Description: Gets the data needed to traverse through the
|
|
|
// decompressed buffer for the indicated frame number.
|
|
// decompressed buffer for the indicated frame number.
|
|
|
// It is most efficient to call this in increasing order
|
|
// It is most efficient to call this in increasing order
|
|
|
-// of frame number.
|
|
|
|
|
|
|
+// of frame number. Returns true on success, false on
|
|
|
|
|
+// failure.
|
|
|
|
|
+//
|
|
|
|
|
+// In the case of a success indication (true return
|
|
|
|
|
+// value), the three pointers r, g, b are loaded with
|
|
|
|
|
+// the addresses of the three components of the
|
|
|
|
|
+// bottom-left pixel of the image. (They will be
|
|
|
|
|
+// adjacent in memory in the case of an interleaved
|
|
|
|
|
+// image, and separated in the case of a
|
|
|
|
|
+// separate-channel image.) The x_pitch value is filled
|
|
|
|
|
+// with the amount to add to each pointer to advance to
|
|
|
|
|
+// the pixel to the right; and the y_pitch value is
|
|
|
|
|
+// filled with the amount to add to each pointer to
|
|
|
|
|
+// advance to the pixel above. Note that these values
|
|
|
|
|
+// may be negative (particularly in the case of a
|
|
|
|
|
+// top-down image).
|
|
|
////////////////////////////////////////////////////////////////////
|
|
////////////////////////////////////////////////////////////////////
|
|
|
-const unsigned char *OpenCVTexture::VideoStream::
|
|
|
|
|
-get_frame_data(int frame) {
|
|
|
|
|
- nassertr(is_valid(), NULL);
|
|
|
|
|
|
|
+bool OpenCVTexture::VideoStream::
|
|
|
|
|
+get_frame_data(int frame,
|
|
|
|
|
+ const unsigned char *&r,
|
|
|
|
|
+ const unsigned char *&g,
|
|
|
|
|
+ const unsigned char *&b,
|
|
|
|
|
+ ssize_t &x_pitch, ssize_t &y_pitch) {
|
|
|
|
|
+ nassertr(is_valid(), false);
|
|
|
|
|
|
|
|
if (is_from_file() && _next_frame != frame) {
|
|
if (is_from_file() && _next_frame != frame) {
|
|
|
cvSetCaptureProperty(_capture, CV_CAP_PROP_POS_FRAMES, frame);
|
|
cvSetCaptureProperty(_capture, CV_CAP_PROP_POS_FRAMES, frame);
|
|
@@ -494,8 +589,34 @@ get_frame_data(int frame) {
|
|
|
_next_frame = frame + 1;
|
|
_next_frame = frame + 1;
|
|
|
IplImage *image = cvQueryFrame(_capture);
|
|
IplImage *image = cvQueryFrame(_capture);
|
|
|
if (image == NULL) {
|
|
if (image == NULL) {
|
|
|
- return NULL;
|
|
|
|
|
|
|
+ return false;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ r = (const unsigned char *)image->imageData;
|
|
|
|
|
+ g = r + 1;
|
|
|
|
|
+ b = g + 1;
|
|
|
|
|
+ x_pitch = 3;
|
|
|
|
|
+ y_pitch = image->widthStep;
|
|
|
|
|
+
|
|
|
|
|
+ if (image->dataOrder == 1) {
|
|
|
|
|
+ // Separate channel images. That means a block of r, followed by
|
|
|
|
|
+ // a block of g, followed by a block of b.
|
|
|
|
|
+ x_pitch = 1;
|
|
|
|
|
+ g = r + image->height * y_pitch;
|
|
|
|
|
+ b = g + image->height * y_pitch;
|
|
|
}
|
|
}
|
|
|
|
|
+
|
|
|
|
|
+ if (image->origin == 0) {
|
|
|
|
|
+ // The image data starts with the top row and ends with the bottom
|
|
|
|
|
+ // row--the opposite of Texture::_ram_data's storage convention.
|
|
|
|
|
+ // Therefore, we must increment the initial pointers to the last
|
|
|
|
|
+ // row, and count backwards.
|
|
|
|
|
+ r += (image->height - 1) * y_pitch;
|
|
|
|
|
+ g += (image->height - 1) * y_pitch;
|
|
|
|
|
+ b += (image->height - 1) * y_pitch;
|
|
|
|
|
+ y_pitch = -y_pitch;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
return (const unsigned char *)image->imageData;
|
|
return (const unsigned char *)image->imageData;
|
|
|
}
|
|
}
|
|
|
|
|
|