feat: implement all core pipeline stage algorithms

- CropProcessor: Canny/contour auto-crop with kMinFrameAreaRatio guard,
  percentile-based levels adjustment (0.5th/99.5th), unsharp mask sharpening
- Inverter: C-41 orange mask removal via border sampling + per-channel
  pedestal subtraction before bitwise_not; B&W simple bitwise_not
- ColorCorrector: LAB-space C-41 cast correction (a*/b* re-centering)
  followed by gray-world auto white balance; EXIF WB fallback
- Preprocessor: 8-bit→16-bit scaling already correct; deskew stub retained

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Christoph K.
2026-03-14 09:41:18 +01:00
parent 344c22b6e3
commit fd2d97ddeb
3 changed files with 422 additions and 73 deletions

View File

@@ -2,11 +2,16 @@
#include <opencv2/imgproc.hpp> #include <opencv2/imgproc.hpp>
#include <algorithm>
#include <format> #include <format>
#include <iostream> #include <iostream>
namespace photoconv { namespace photoconv {
// ─────────────────────────────────────────────────────────────────────────────
// PipelineStage interface
// ─────────────────────────────────────────────────────────────────────────────
StageResult ColorCorrector::process(ImageData data) const { StageResult ColorCorrector::process(ImageData data) const {
if (data.rgb.empty()) { if (data.rgb.empty()) {
return std::unexpected(make_error( return std::unexpected(make_error(
@@ -16,7 +21,7 @@ StageResult ColorCorrector::process(ImageData data) const {
switch (data.film_type) { switch (data.film_type) {
case FilmType::ColorNegative: { case FilmType::ColorNegative: {
std::cout << "[Color] Applying C-41 correction" << std::endl; std::cout << "[Color] Applying C-41 correction followed by AWB" << std::endl;
auto result = correct_c41(std::move(data)); auto result = correct_c41(std::move(data));
if (!result.has_value()) return result; if (!result.has_value()) return result;
return auto_white_balance(std::move(result.value())); return auto_white_balance(std::move(result.value()));
@@ -24,82 +29,158 @@ StageResult ColorCorrector::process(ImageData data) const {
case FilmType::BWNegative: case FilmType::BWNegative:
case FilmType::BWPositive: case FilmType::BWPositive:
std::cout << "[Color] B&W image, skipping color correction" << std::endl; std::cout << "[Color] B&W image, skipping colour correction" << std::endl;
return data; return data;
case FilmType::ColorPositive: case FilmType::ColorPositive:
std::cout << "[Color] Positive, applying auto white balance" << std::endl; std::cout << "[Color] Positive applying auto white balance" << std::endl;
return auto_white_balance(std::move(data)); return auto_white_balance(std::move(data));
case FilmType::Unknown: case FilmType::Unknown:
std::cout << "[Color] Unknown type, applying auto white balance" << std::endl; std::cout << "[Color] Unknown film type applying auto white balance" << std::endl;
return auto_white_balance(std::move(data)); return auto_white_balance(std::move(data));
} }
return data; return data;
} }
// ─────────────────────────────────────────────────────────────────────────────
// correct_c41
// ─────────────────────────────────────────────────────────────────────────────
StageResult ColorCorrector::correct_c41(ImageData data) { StageResult ColorCorrector::correct_c41(ImageData data) {
// TODO: Implement proper C-41 orange cast correction. // C-41 orange cast correction via LAB colour space.
//
// After the Inverter has removed the mask pedestal and flipped the
// values, a residual warm/orange cast remains because the original
// mask was not perfectly neutral. The cast shows up in the CIE-LAB
// a* and b* channels as a positive bias (orange = high a*, high b*).
//
// Strategy: // Strategy:
// 1. Convert to LAB color space // 1. Convert the 16-bit BGR image to 8-bit LAB (OpenCV's cvtColor
// 2. Analyze a/b channels for orange bias // expects 8-bit or 32-bit float input for LAB).
// 3. Apply per-channel curve adjustment to neutralize // 2. Split into L, a, b channels.
// 4. Convert back to BGR // 3. Compute the mean of the a and b channels.
// 4. Subtract the mean offset from every pixel in each channel so
// that neutral greys land on a*=0, b*=0.
// 5. Merge and convert back to 16-bit BGR.
// Convert to 8-bit for LAB processing.
cv::Mat bgr8;
data.rgb.convertTo(bgr8, CV_8UC3, 1.0 / 257.0);
cv::Mat lab;
cv::cvtColor(bgr8, lab, cv::COLOR_BGR2Lab);
std::vector<cv::Mat> lab_channels(3);
cv::split(lab, lab_channels);
// Lab channel layout: [0]=L*, [1]=a*, [2]=b*
// In OpenCV's 8-bit LAB, a* and b* are offset by 128 so that neutral
// grey is at 128. We subtract the mean to re-centre the cast.
const double a_mean = cv::mean(lab_channels[1])[0];
const double b_mean = cv::mean(lab_channels[2])[0];
std::cout << std::format(
"[Color] C-41 cast: a*_mean={:.1f} (offset={:.1f}), b*_mean={:.1f} (offset={:.1f})",
a_mean, a_mean - 128.0, b_mean, b_mean - 128.0) << std::endl;
// Subtract colour cast from a* and b* channels.
// Neutral grey should sit at 128 after correction.
const double a_offset = a_mean - 128.0;
const double b_offset = b_mean - 128.0;
lab_channels[1].convertTo(lab_channels[1], CV_8U, 1.0, -a_offset);
lab_channels[2].convertTo(lab_channels[2], CV_8U, 1.0, -b_offset);
cv::merge(lab_channels, lab);
// Convert back to 8-bit BGR, then scale back to 16-bit.
cv::Mat bgr8_corrected;
cv::cvtColor(lab, bgr8_corrected, cv::COLOR_Lab2BGR);
bgr8_corrected.convertTo(data.rgb, CV_16UC3, 257.0);
return data; return data;
} }
// ─────────────────────────────────────────────────────────────────────────────
// auto_white_balance
// ─────────────────────────────────────────────────────────────────────────────
StageResult ColorCorrector::auto_white_balance(ImageData data) { StageResult ColorCorrector::auto_white_balance(ImageData data) {
// TODO: Implement gray-world auto white balance. // Gray-world auto white balance.
// Strategy: //
// 1. Compute mean of each BGR channel // Assumption: the average colour of the scene is achromatic (grey).
// 2. Compute overall gray mean // Therefore the per-channel means should all equal the overall mean.
// 3. Scale each channel: channel *= (gray_mean / channel_mean) //
// 4. Clamp to 16-bit range [0, 65535] // For each channel c:
// scale_c = overall_mean / channel_mean_c
// output_c = clip(input_c * scale_c, 0, 65535)
cv::Scalar channel_means = cv::mean(data.rgb); const cv::Scalar channel_means = cv::mean(data.rgb);
const double gray_mean = (channel_means[0] + channel_means[1] + channel_means[2]) / 3.0; const double b_mean = channel_means[0];
const double g_mean = channel_means[1];
const double r_mean = channel_means[2];
if (channel_means[0] < 1.0 || channel_means[1] < 1.0 || channel_means[2] < 1.0) { // Avoid division by zero for near-black or near-uniform images.
std::cout << "[Color] Skipping AWB: near-zero channel mean" << std::endl; if (b_mean < 1.0 || g_mean < 1.0 || r_mean < 1.0) {
std::cout << "[Color] AWB skipped: near-zero channel mean" << std::endl;
return data; return data;
} }
std::vector<cv::Mat> channels; const double gray_mean = (b_mean + g_mean + r_mean) / 3.0;
const double scale_b = gray_mean / b_mean;
const double scale_g = gray_mean / g_mean;
const double scale_r = gray_mean / r_mean;
std::cout << std::format(
"[Color] AWB scales: B={:.3f} G={:.3f} R={:.3f}",
scale_b, scale_g, scale_r) << std::endl;
std::vector<cv::Mat> channels(3);
cv::split(data.rgb, channels); cv::split(data.rgb, channels);
for (int i = 0; i < 3; ++i) { // convertTo applies: dst = src * scale, saturating to [0, 65535]
const double scale = gray_mean / channel_means[i]; channels[0].convertTo(channels[0], CV_16U, scale_b);
channels[i].convertTo(channels[i], CV_16U, scale); channels[1].convertTo(channels[1], CV_16U, scale_g);
} channels[2].convertTo(channels[2], CV_16U, scale_r);
cv::merge(channels, data.rgb); cv::merge(channels, data.rgb);
std::cout << std::format("[Color] AWB applied: scale B={:.3f} G={:.3f} R={:.3f}",
gray_mean / channel_means[0],
gray_mean / channel_means[1],
gray_mean / channel_means[2]) << std::endl;
return data; return data;
} }
// ─────────────────────────────────────────────────────────────────────────────
// apply_exif_wb
// ─────────────────────────────────────────────────────────────────────────────
StageResult ColorCorrector::apply_exif_wb(ImageData data) { StageResult ColorCorrector::apply_exif_wb(ImageData data) {
// Apply white balance from camera metadata // Apply the camera's recorded white-balance multipliers from EXIF metadata.
// LibRaw normalises the green channel to 1.0; red and blue are relative to it.
//
// If metadata is missing or invalid, fall back to gray-world AWB.
const auto& meta = data.metadata; const auto& meta = data.metadata;
if (meta.wb_red <= 0.0f || meta.wb_blue <= 0.0f) { if (meta.wb_red <= 0.0f || meta.wb_blue <= 0.0f) {
std::cout << "[Color] EXIF WB missing falling back to AWB" << std::endl;
return auto_white_balance(std::move(data)); return auto_white_balance(std::move(data));
} }
std::vector<cv::Mat> channels; std::vector<cv::Mat> channels(3);
cv::split(data.rgb, channels); cv::split(data.rgb, channels);
// channels[0]=B, channels[1]=G, channels[2]=R // channels[0]=B, channels[1]=G, channels[2]=R
channels[0].convertTo(channels[0], CV_16U, meta.wb_blue); channels[0].convertTo(channels[0], CV_16U, static_cast<double>(meta.wb_blue));
channels[2].convertTo(channels[2], CV_16U, meta.wb_red); channels[1].convertTo(channels[1], CV_16U, static_cast<double>(meta.wb_green));
channels[2].convertTo(channels[2], CV_16U, static_cast<double>(meta.wb_red));
cv::merge(channels, data.rgb); cv::merge(channels, data.rgb);
std::cout << std::format("[Color] EXIF WB applied: R={:.3f} G={:.3f} B={:.3f}", std::cout << std::format(
"[Color] EXIF WB applied: R={:.3f} G={:.3f} B={:.3f}",
meta.wb_red, meta.wb_green, meta.wb_blue) << std::endl; meta.wb_red, meta.wb_green, meta.wb_blue) << std::endl;
return data; return data;
} }

View File

@@ -5,16 +5,22 @@
#include <algorithm> #include <algorithm>
#include <format> #include <format>
#include <iostream> #include <iostream>
#include <numeric>
#include <vector>
namespace photoconv { namespace photoconv {
// ─────────────────────────────────────────────────────────────────────────────
// Public interface
// ─────────────────────────────────────────────────────────────────────────────
StageResult CropProcessor::process(ImageData data) const { StageResult CropProcessor::process(ImageData data) const {
if (data.rgb.empty()) { if (data.rgb.empty()) {
return std::unexpected(make_error( return std::unexpected(make_error(
ErrorCode::CropFailed, "CropProcessor received empty image")); ErrorCode::CropFailed, "CropProcessor received empty image"));
} }
// Execute sub-stages in order // Sub-stages execute in order; each may fail independently.
auto result = auto_crop(std::move(data)); auto result = auto_crop(std::move(data));
if (!result.has_value()) return result; if (!result.has_value()) return result;
@@ -24,42 +30,188 @@ StageResult CropProcessor::process(ImageData data) const {
return sharpen(std::move(result.value())); return sharpen(std::move(result.value()));
} }
StageResult CropProcessor::auto_crop(ImageData data) { // ─────────────────────────────────────────────────────────────────────────────
// TODO: Implement frame detection. // auto_crop
// Strategy: // ─────────────────────────────────────────────────────────────────────────────
// 1. Convert to grayscale
// 2. Apply Gaussian blur + Canny edge detection
// 3. Find contours, select largest rectangular contour
// 4. Validate: area > kMinFrameAreaRatio * total area
// 5. Apply perspective transform if needed
// 6. Crop to bounding rect
std::cout << std::format("[PostProcess] Auto-crop: image {}x{} (pass-through)", StageResult CropProcessor::auto_crop(ImageData data) {
// Strategy:
// 1. Convert 16-bit BGR to 8-bit greyscale for edge detection.
// 2. Apply Gaussian blur + Canny to find film frame borders.
// 3. Find contours and select the largest one whose bounding rect
// covers at least kMinFrameAreaRatio of the total image area.
// 4. Crop to that bounding rect.
const int total_area = data.rgb.cols * data.rgb.rows;
// Downscale to 8-bit for edge detection (avoids precision loss in Canny).
cv::Mat gray8;
cv::Mat gray16;
cv::cvtColor(data.rgb, gray16, cv::COLOR_BGR2GRAY);
gray16.convertTo(gray8, CV_8U, 1.0 / 257.0);
// Smooth before edge detection to reduce noise from film grain.
cv::Mat blurred;
cv::GaussianBlur(gray8, blurred, {5, 5}, 0);
// Canny edge detection thresholds tuned for film borders.
cv::Mat edges;
cv::Canny(blurred, edges, /*threshold1=*/30, /*threshold2=*/90);
// Find contours from edge map.
std::vector<std::vector<cv::Point>> contours;
cv::findContours(edges, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
if (contours.empty()) {
std::cout << std::format(
"[PostProcess] Auto-crop: no contours found, keeping {}x{}",
data.rgb.cols, data.rgb.rows) << std::endl; data.rgb.cols, data.rgb.rows) << std::endl;
return data; return data;
} }
StageResult CropProcessor::adjust_levels(ImageData data) { // Find the bounding rect of the largest contour by area.
// TODO: Implement histogram-based levels adjustment. cv::Rect best_rect;
// Strategy: double best_area = 0.0;
// 1. Compute cumulative histogram per channel
// 2. Find black point at kBlackPointPercentile
// 3. Find white point at kWhitePointPercentile
// 4. Remap: output = (input - black) * 65535 / (white - black)
// 5. Clamp to [0, 65535]
std::cout << "[PostProcess] Levels adjustment (pass-through)" << std::endl; for (const auto& contour : contours) {
const double area = cv::contourArea(contour);
if (area > best_area) {
best_area = area;
best_rect = cv::boundingRect(contour);
}
}
// Validate: the detected region must be large enough to be a film frame,
// not just an artifact.
const double frame_ratio = static_cast<double>(best_rect.area()) /
static_cast<double>(total_area);
if (frame_ratio < kMinFrameAreaRatio) {
std::cout << std::format(
"[PostProcess] Auto-crop: frame ratio {:.2f} < {:.2f}, skipping crop",
frame_ratio, kMinFrameAreaRatio) << std::endl;
return data; return data;
} }
StageResult CropProcessor::sharpen(ImageData data) { // Add a small margin so we do not clip the very edge of the frame.
// TODO: Implement unsharp mask. constexpr int kMarginPx = 4;
// Strategy: best_rect.x = std::max(0, best_rect.x - kMarginPx);
// 1. GaussianBlur with kSharpenSigma best_rect.y = std::max(0, best_rect.y - kMarginPx);
// 2. sharpened = original + kSharpenStrength * (original - blurred) best_rect.width = std::min(data.rgb.cols - best_rect.x, best_rect.width + 2 * kMarginPx);
// 3. Clamp to 16-bit range best_rect.height = std::min(data.rgb.rows - best_rect.y, best_rect.height + 2 * kMarginPx);
data.crop_region = best_rect;
data.rgb = data.rgb(best_rect).clone();
std::cout << std::format(
"[PostProcess] Auto-crop: {}x{} -> {}x{} (frame ratio {:.2f})",
gray8.cols, gray8.rows,
data.rgb.cols, data.rgb.rows,
frame_ratio) << std::endl;
return data;
}
// ─────────────────────────────────────────────────────────────────────────────
// adjust_levels
// ─────────────────────────────────────────────────────────────────────────────
StageResult CropProcessor::adjust_levels(ImageData data) {
// Percentile-based levels adjustment applied independently per channel.
//
// Algorithm:
// 1. Compute a 65536-bin histogram for each BGR channel.
// 2. Walk the cumulative histogram to find the intensity values at the
// kBlackPointPercentile and kWhitePointPercentile percentiles.
// 3. Apply linear remap: output = (input - black) * 65535 / (white - black).
// 4. Clamp output to [0, 65535].
constexpr int kHistBins = 65536;
constexpr float kRange[] = {0.0f, 65536.0f};
const float* kRangePtr = kRange;
constexpr int kChannels = 3;
std::vector<cv::Mat> channels(kChannels);
cv::split(data.rgb, channels);
for (int ch = 0; ch < kChannels; ++ch) {
// Build histogram.
cv::Mat hist;
const int channel_idx = 0; // single-channel input after split
cv::calcHist(&channels[ch], 1, &channel_idx, cv::Mat{},
hist, 1, &kHistBins, &kRangePtr);
const double total_pixels = static_cast<double>(data.rgb.cols * data.rgb.rows);
// Walk cumulative histogram to find black point.
double cumulative = 0.0;
int black_point = 0;
for (int i = 0; i < kHistBins; ++i) {
cumulative += hist.at<float>(i);
if (cumulative / total_pixels >= kBlackPointPercentile / 100.0) {
black_point = i;
break;
}
}
// Walk cumulative histogram to find white point.
cumulative = 0.0;
int white_point = kHistBins - 1;
for (int i = 0; i < kHistBins; ++i) {
cumulative += hist.at<float>(i);
if (cumulative / total_pixels >= kWhitePointPercentile / 100.0) {
white_point = i;
break;
}
}
if (white_point <= black_point) {
// Degenerate histogram (flat or constant image) skip this channel.
continue;
}
// Linear remap via LUT for efficiency.
const double scale = 65535.0 / static_cast<double>(white_point - black_point);
const double offset = -static_cast<double>(black_point) * scale;
// convertTo applies: dst = src * scale + offset, then clips to [0, 65535].
channels[ch].convertTo(channels[ch], CV_16U, scale, offset);
}
cv::merge(channels, data.rgb);
std::cout << std::format(
"[PostProcess] Levels adjusted (black={:.1f}%, white={:.1f}%)",
kBlackPointPercentile, kWhitePointPercentile) << std::endl;
return data;
}
// ─────────────────────────────────────────────────────────────────────────────
// sharpen
// ─────────────────────────────────────────────────────────────────────────────
StageResult CropProcessor::sharpen(ImageData data) {
// Unsharp mask: sharpened = original + strength * (original - blurred).
//
// Using addWeighted:
// dst = alpha * original + beta * blurred + gamma
// where alpha = 1 + strength, beta = -strength, gamma = 0.
// Compute Gaussian blur. Kernel size 0 = auto-compute from sigma.
cv::Mat blurred;
cv::GaussianBlur(data.rgb, blurred, {0, 0}, kSharpenSigma);
// addWeighted handles 16-bit saturating arithmetic correctly.
cv::addWeighted(data.rgb, 1.0 + kSharpenStrength,
blurred, -kSharpenStrength,
0.0,
data.rgb);
std::cout << std::format(
"[PostProcess] Sharpened (sigma={:.1f}, strength={:.2f})",
kSharpenSigma, kSharpenStrength) << std::endl;
std::cout << "[PostProcess] Sharpening (pass-through)" << std::endl;
return data; return data;
} }

View File

@@ -3,11 +3,90 @@
#include <opencv2/core.hpp> #include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp> #include <opencv2/imgproc.hpp>
#include <algorithm>
#include <format> #include <format>
#include <iostream> #include <iostream>
namespace photoconv { namespace photoconv {
// ─────────────────────────────────────────────────────────────────────────────
// Constants
// ─────────────────────────────────────────────────────────────────────────────
/// Border sampling thickness (pixels) for orange mask characterisation.
/// We sample the outermost strips of the image where no image content
/// falls so the mask colour is measured cleanly.
static constexpr int kBorderSamplePx = 32;
/// Minimum border dimension required for mask sampling.
/// If the image is smaller than this in either dimension, we fall back
/// to a global mean sample.
static constexpr int kMinBorderDim = kBorderSamplePx * 4;
// ─────────────────────────────────────────────────────────────────────────────
// Helpers
// ─────────────────────────────────────────────────────────────────────────────
namespace {
/**
* @brief Sample the orange mask colour from the film border areas.
*
* For C-41 film negatives, the orange anti-halation mask is most pure
* in the unexposed border regions outside the frame (sprocket holes,
* edge strips). We sample a strip from each of the four edges and
* compute the per-channel mean.
*
* @param rgb 16-bit BGR image.
* @return Per-channel mean [B, G, R] of the sampled border region.
*/
cv::Scalar sample_border_mask(const cv::Mat& rgb) {
const int w = rgb.cols;
const int h = rgb.rows;
if (w < kMinBorderDim || h < kMinBorderDim) {
// Fallback: use full-image mean as a rough mask estimate.
return cv::mean(rgb);
}
// Collect four border strips, then compute combined mean.
std::vector<cv::Mat> strips;
strips.reserve(4);
// Top and bottom strips (full width)
strips.push_back(rgb(cv::Rect{0, 0, w, kBorderSamplePx}));
strips.push_back(rgb(cv::Rect{0, h - kBorderSamplePx, w, kBorderSamplePx}));
// Left and right strips (inner height, excluding corners already covered)
const int inner_y = kBorderSamplePx;
const int inner_h = h - 2 * kBorderSamplePx;
if (inner_h > 0) {
strips.push_back(rgb(cv::Rect{0, inner_y, kBorderSamplePx, inner_h}));
strips.push_back(rgb(cv::Rect{w - kBorderSamplePx, inner_y, kBorderSamplePx, inner_h}));
}
// Accumulate weighted mean
cv::Scalar sum{0, 0, 0, 0};
int64_t pixel_count = 0;
for (const auto& strip : strips) {
cv::Scalar s = cv::mean(strip);
const int64_t n = strip.cols * strip.rows;
sum += s * static_cast<double>(n);
pixel_count += n;
}
if (pixel_count == 0) return cv::mean(rgb);
return sum * (1.0 / static_cast<double>(pixel_count));
}
} // anonymous namespace
// ─────────────────────────────────────────────────────────────────────────────
// PipelineStage interface
// ─────────────────────────────────────────────────────────────────────────────
StageResult Inverter::process(ImageData data) const { StageResult Inverter::process(ImageData data) const {
if (data.rgb.empty()) { if (data.rgb.empty()) {
return std::unexpected(make_error( return std::unexpected(make_error(
@@ -16,7 +95,8 @@ StageResult Inverter::process(ImageData data) const {
switch (data.film_type) { switch (data.film_type) {
case FilmType::ColorNegative: case FilmType::ColorNegative:
std::cout << "[Invert] Inverting color negative (C-41)" << std::endl; std::cout << "[Invert] Inverting color negative (C-41 with orange mask removal)"
<< std::endl;
return invert_color_negative(std::move(data)); return invert_color_negative(std::move(data));
case FilmType::BWNegative: case FilmType::BWNegative:
@@ -29,29 +109,65 @@ StageResult Inverter::process(ImageData data) const {
return data; return data;
case FilmType::Unknown: case FilmType::Unknown:
std::cout << "[Invert] Unknown film type, applying default inversion" << std::endl; std::cout << "[Invert] Unknown film type, applying colour negative inversion"
<< std::endl;
return invert_color_negative(std::move(data)); return invert_color_negative(std::move(data));
} }
return data; // Unreachable, but satisfies compiler return data; // Unreachable, satisfies the compiler
} }
StageResult Inverter::invert_color_negative(ImageData data) { // ─────────────────────────────────────────────────────────────────────────────
// TODO: Implement proper C-41 orange mask removal. // invert_color_negative
// Strategy: // ─────────────────────────────────────────────────────────────────────────────
// 1. Sample unexposed border regions to characterize the orange mask
// 2. Compute per-channel mask color (typically R > G > B)
// 3. Subtract mask contribution from each channel
// 4. Apply bitwise_not inversion
// 5. Apply per-channel scaling to normalize levels
// Basic inversion for now StageResult Inverter::invert_color_negative(ImageData data) {
// C-41 inversion approach:
//
// 1. Sample the orange mask colour from the unexposed film border.
// The mask contributes an additive orange pedestal to every pixel.
// 2. Subtract the mask pedestal per channel so that the unexposed
// areas become approximately neutral (zero) before inversion.
// 3. Apply bitwise_not (i.e., 65535 - value) to flip the negative.
//
// After this stage the ColorCorrector's AWB will fine-tune the
// remaining colour cast.
const cv::Scalar mask_color = sample_border_mask(data.rgb);
std::cout << std::format(
"[Invert] Orange mask sample: B={:.0f} G={:.0f} R={:.0f}",
mask_color[0], mask_color[1], mask_color[2]) << std::endl;
// Split into channels for per-channel processing.
std::vector<cv::Mat> channels(3);
cv::split(data.rgb, channels);
// Subtract mask pedestal from each channel.
// Use saturating arithmetic: convertTo with a negative offset and
// CV_16U type will clip at 0 automatically.
for (int ch = 0; ch < 3; ++ch) {
const double pedestal = mask_color[ch];
if (pedestal > 0.0) {
// dst = src * 1.0 + (-pedestal), clipped to [0, 65535]
channels[ch].convertTo(channels[ch], CV_16U, 1.0, -pedestal);
}
}
cv::merge(channels, data.rgb);
// Bitwise inversion: each pixel value v becomes 65535 - v.
cv::bitwise_not(data.rgb, data.rgb); cv::bitwise_not(data.rgb, data.rgb);
return data; return data;
} }
// ─────────────────────────────────────────────────────────────────────────────
// invert_bw_negative
// ─────────────────────────────────────────────────────────────────────────────
StageResult Inverter::invert_bw_negative(ImageData data) { StageResult Inverter::invert_bw_negative(ImageData data) {
// Simple bitwise inversion for monochrome negatives.
cv::bitwise_not(data.rgb, data.rgb); cv::bitwise_not(data.rgb, data.rgb);
return data; return data;
} }