Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updates to hls::vision library: #16

Open
wants to merge 12 commits into
base: main
Choose a base branch
from
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
**/hls_output
vision/demo_designs/PF_Video_kit/libero/vision_pipeline
vision/precompiled_sw_libraries/ffmpeg4.4-x86_64
vision/precompiled_sw_libraries/opencv4.5.4-x86_64
115 changes: 64 additions & 51 deletions vision/examples/debayer/debayer.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
#include "common/params.hpp"
jennifermah76 marked this conversation as resolved.
Show resolved Hide resolved
#include "vision.hpp"
#include <opencv2/opencv.hpp>

using namespace hls;
using cv::Mat;
using vision::Img;
using vision::PixelType;
using vision::StorageType;
Expand All @@ -16,77 +13,93 @@ using vision::StorageType;
#define HEIGHT 1080
#endif

#define SIZE (WIDTH * HEIGHT)

//
// RGB and Bayer image types with 4 Pixels Per Clock (4PPC)
// Using 4 pixels per clock would enable using lower clock frequencies in
// hardware. This is particularly useful when dealing with 4K data, but can help
// lower the required Fmax in other resolutions as well, making hardware
// implementation easier.
//
using RgbImgT4PPC =
Img<PixelType::HLS_8UC3, HEIGHT, WIDTH, StorageType::FIFO, vision::NPPC_4>;
Img<PixelType::HLS_8UC3, HEIGHT, WIDTH, StorageType::FIFO, vision::NPPC_4>;
using BayerImgT4PPC =
Img<PixelType::HLS_8UC1, HEIGHT, WIDTH, StorageType::FIFO, vision::NPPC_4>;

template <PixelType PIXEL_T_IN, PixelType PIXEL_T_OUT, unsigned H, unsigned W,
StorageType STORAGE_IN, StorageType STORAGE_OUT,
vision::NumPixelsPerCycle NPPC = vision::NPPC_1>
void DeBayerWrapper(vision::Img<PIXEL_T_IN, H, W, STORAGE_IN, NPPC> &ImgIn,
vision::Img<PIXEL_T_OUT, H, W, STORAGE_OUT, NPPC> &ImgOut,
vision::BayerFormat Format = vision::BayerFormat::RGGB) {
#pragma HLS function top
vision::DeBayer(ImgIn, ImgOut, Format);
}
Img<PixelType::HLS_8UC1, HEIGHT, WIDTH, StorageType::FIFO, vision::NPPC_4>;
using RgbImgT4PPC8b =
Img<PixelType::HLS_8UC3, HEIGHT, WIDTH, StorageType::FIFO, vision::NPPC_4>;

template <PixelType PIXEL_T_IN, PixelType PIXEL_T_OUT, unsigned H, unsigned W,
StorageType STORAGE = vision::FIFO,
vision::NumPixelsPerCycle NPPC = vision::NPPC_1>
void RGB2BayerWrapper(vision::Img<PIXEL_T_IN, H, W, STORAGE, NPPC> &ImgIn,
vision::Img<PIXEL_T_OUT, H, W, STORAGE, NPPC> &ImgOut) {
#pragma HLS function top
vision::RGB2Bayer(ImgIn, ImgOut);
//------------------------------------------------------------------------------
// This function converts the input image to Bayer and back. This is done
// mainly to test the DeBayer function, which is what would be most commonly
// used ina real design.
//
// 3 Channel BGR -> 1 Channel Bayer BGGR -> 3 Channel BGR
//
template <
PixelType PIXEL_T_IN,
PixelType PIXEL_T_OUT,
unsigned H,
unsigned W,
StorageType STORAGE_IN,
StorageType STORAGE_OUT,
vision::NumPixelsPerCycle NPPC
>
void BayerDeBayerWrapper (
vision::Img<PIXEL_T_IN, H, W, STORAGE_IN, NPPC> &InImg,
vision::Img<PIXEL_T_OUT, H, W, STORAGE_OUT, NPPC> &OutImg,
vision::BayerFormat Format) {
#pragma HLS function top dataflow
BayerImgT4PPC BayerImg;
vision::BGR2Bayer(InImg, BayerImg, Format);
vision::DeBayer(BayerImg, OutImg, Format);
}

//------------------------------------------------------------------------------
int main(int argc, char* argv[]) {
RgbImgT4PPC InImg, OutImg, DeBayerGoldImg;
RgbImgT4PPC InImg, OutImg;
BayerImgT4PPC BayerImg;

// Read input image into a cv Mat and convert it to RGB format since cv
// reads images in BGR format
std::string INPUT_IMAGE=argv[1];
Mat RGBInMat = cv::imread(INPUT_IMAGE, cv::IMREAD_COLOR);

// Convert the cv Mat into Img class
convertFromCvMat(RGBInMat, InImg);
// Process the input, converting RGB to bayer and bayer back to RGB
RGB2BayerWrapper(InImg, BayerImg);
DeBayerWrapper(BayerImg, OutImg, vision::BayerFormat::RGGB);
Mat BGRInMat = cv::imread(INPUT_IMAGE, cv::IMREAD_COLOR);
if (BGRInMat.empty()) {
printf("Error: Could not open file: %s\n.", INPUT_IMAGE.c_str());
return 1;
}

//
// Convert the cvMat to SmartHLS Image and process it
//
vision::convertFromCvMat(BGRInMat, InImg);
BayerDeBayerWrapper(InImg, OutImg, vision::BayerFormat::BGGR);

//
// Convert the output image to cv Mat for comparing with the input image.
// As we convert the RGB input image to bayer format and convert back to
// RGB, the input and output images should be similar
// Also, write the result to a file for visual inspection.
//
Mat OutMat;
vision::convertToCvMat(OutImg, OutMat);
cv::imwrite("hls_out.png", OutMat);

// Compare output image with golden image. They should completely match
//
// Compare SmartHLS debayer output image with the golden reference of the
// debayered image. They should completely match.
//
std::string GOLDEN_OUTPUT=argv[2];
Mat RGBGoldenMat = cv::imread(GOLDEN_OUTPUT, cv::IMREAD_COLOR);
float ErrPercentGolden = vision::compareMat(RGBGoldenMat, OutMat, 0);
Mat BGRGoldenMat = cv::imread(GOLDEN_OUTPUT, cv::IMREAD_COLOR);
float ErrPercentGolden = vision::compareMat(BGRGoldenMat, OutMat, 0);
printf("ErrPercentGolden: %0.2lf%\n", ErrPercentGolden);

// Compare the output image with the input image
// Converting RGB to bayer format results in 3x reduction in data size, and
// converting back from bayer to RGB (using debayer function) will result in
// mismatch.
// So we will say pass as long as less than 5% of pixels (each channel) have
//
// Compare the SmartHLS debayred output image with the input image.
// Converting to bayer format results in 3x reduction in data size, and
// converting back from bayer (using debayer function) will result in mismatch.
// So we will say PASS as long as less than 10% of pixels (each channel) have
// mismatch greater than 32 (in range of 0-255).
float ErrPercent = vision::compareMat(RGBInMat, OutMat, 32);
cv::imwrite("hls_out.png", OutMat);
//
float ErrPercent = vision::compareMat(BGRInMat, OutMat, 32);
printf("Percentage of over threshold In vs Out: %0.2lf%\n", ErrPercent);
if (ErrPercent < 5 && ErrPercentGolden == 0.0) {
printf("PASS");
return 0;
}
printf("FAIL");
return 1;

int error = (ErrPercent > 10 || ErrPercentGolden != 0.0);
printf("%s\n", error ? "FAIL" : "PASS");
return error;
}
82 changes: 82 additions & 0 deletions vision/include/common/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

#include "params.hpp"
#include <hls/streaming.hpp>
#include <hls/ap_fixpt.hpp>

#ifndef __SYNTHESIS__
#define LEGUP_SW_IMPL(code) code
Expand Down Expand Up @@ -205,6 +206,7 @@ class Img {
LEGUP_SW_IMPL(assert(h <= H));
height = h;
}

void set_width(unsigned w) {
LEGUP_SW_IMPL(assert(w <= W));
width = w;
Expand All @@ -225,6 +227,7 @@ class Img {
data.setDepth(fifo_depth);
#endif
};

template <
StorageType st = STORAGE,
typename std::enable_if<st != StorageType::FIFO>::type * = nullptr>
Expand Down Expand Up @@ -311,6 +314,85 @@ class Img {
}
return is_equal;
});



/**
* @function ConverTo
* Similar to OpenCV's ConverTo. This function multiplies and adds some factors
* to each pixel of the image.
*
* $$ outPix = beta + alpha * inPix $$
*
* Both "beta" and "alpha" parameters are converted to fixed point.
*
* NOTE that the internal "tmp" variable uses the output pixel's width for the
* fixed point representation with AP_SAT to avoid overflows in the returned value.
* The "alpha" and "beta" parameters use the input pixel's width for
* the fixed point representation. The number of fractional bits is set
* arbitrarily to 8. We just need a few fractinal bits to multiply the pixel by
* factors less than 1.
*
* @param {vision::Img OutImg} Output image. The NPPC must match the source
* image
* @param {float alpha} optional scale factor applied to the input pixel.
* alpha = 1 by default.
* @param {float beta} optional factor added to the scaled input pixel.
* beta = 0 by default.
*
*/
template <
PixelType PIXEL_T_O,
unsigned H_O,
unsigned W_O,
StorageType STORAGE_O = FIFO,
NumPixelsPerCycle NPPC_O = NPPC_1
>
void ConvertTo (
vision::Img<PIXEL_T_O, H_O, W_O, STORAGE_O, NPPC_O> &OutImg,
float alpha = 1.0,
float beta = 0.0
) {
#pragma HLS memory partition argument(OutImg) type(struct_fields)

OutImg.set_height(get_height());
OutImg.set_width(get_width());

const unsigned ChWidth = DT<PIXEL_T, NPPC>::PerChannelPixelWidth;
const unsigned PixelWordWidth = DT<PIXEL_T, NPPC>::W / NPPC;
const unsigned OutChWidth = DT<PIXEL_T_O, NPPC_O>::PerChannelPixelWidth;

const unsigned InNumChannels = DT<PIXEL_T>::NumChannels;
const unsigned OutNumChannels = DT<PIXEL_T_O>::NumChannels;
const unsigned NumPixels = height * width / NPPC;

static_assert(NPPC_O == NPPC,
"Error. The NPPC must be the same for both images");

static_assert(InNumChannels == OutNumChannels,
"Error. The number of channels must be the same for both images");

HLS_VISION_CONVERTO_LOOP:
#pragma HLS loop pipeline
for (unsigned k = 0; k < NumPixels; k++) {
DATA_T_ ImgdataIn = read(k);
typename DT<PIXEL_T_O, NPPC_O>::T ImgdataOut;
for (unsigned p = 0; p < NPPC; p++) {
DATA_T_ InPixel = ImgdataIn.byte(p, DT<PIXEL_T,NPPC>::W);
typename DT<PIXEL_T_O>::T OutPixel;
#pragma HLS loop unroll
for (int c = 0; c < InNumChannels; c++) {
ap_ufixpt<OutChWidth, OutChWidth, AP_RND, AP_SAT> tmp;
tmp = ap_ufixpt<ChWidth, ChWidth, AP_RND, AP_SAT>(beta) +
ap_ufixpt<ChWidth + 8, ChWidth, AP_RND, AP_SAT>(alpha) *
ap_ufixpt<ChWidth, ChWidth, AP_RND, AP_SAT>(InPixel.byte(c, ChWidth));
OutPixel.byte(c, OutChWidth) = tmp;
}
ImgdataOut.byte(p, PixelWordWidth) = OutPixel;
}
OutImg.write(ImgdataOut, k);
}
}
};

} // End of namespace vision.
Expand Down
Loading