diff --git a/extern/CMakeLists.txt b/extern/CMakeLists.txt index 55ced4d5cc1..24ec3c107b8 100644 --- a/extern/CMakeLists.txt +++ b/extern/CMakeLists.txt @@ -17,7 +17,11 @@ endif() add_subdirectory(rangetree) add_subdirectory(wcwidth) -add_subdirectory(perceptualdiff) +#FRL_CLR_BEGIN +if(UNIX AND NOT APPLE) + add_subdirectory(perceptualdiff) +endif() +#FRL_CLR_END if(WITH_BULLET) if(NOT WITH_SYSTEM_BULLET) diff --git a/extern/nvidia/Video_Codec_SDK_11.1.5/Deprecation_Notices.pdf b/extern/nvidia/Video_Codec_SDK_11.1.5/Deprecation_Notices.pdf new file mode 100644 index 00000000000..41fd323d908 Binary files /dev/null and b/extern/nvidia/Video_Codec_SDK_11.1.5/Deprecation_Notices.pdf differ diff --git a/extern/nvidia/Video_Codec_SDK_11.1.5/Interface/cuviddec.h b/extern/nvidia/Video_Codec_SDK_11.1.5/Interface/cuviddec.h new file mode 100644 index 00000000000..0a423d6e2ac --- /dev/null +++ b/extern/nvidia/Video_Codec_SDK_11.1.5/Interface/cuviddec.h @@ -0,0 +1,1201 @@ +/* + * This copyright notice applies to this header file only: + * + * Copyright (c) 2010-2021 NVIDIA Corporation + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the software, and to permit persons to whom the + * software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/*****************************************************************************************************/ +//! \file cuviddec.h +//! NVDECODE API provides video decoding interface to NVIDIA GPU devices. +//! This file contains constants, structure definitions and function prototypes used for decoding. +/*****************************************************************************************************/ + +#if !defined(__CUDA_VIDEO_H__) +#define __CUDA_VIDEO_H__ + +#ifndef __cuda_cuda_h__ + +//FRL_CLR_BEGIN +#ifdef WITH_CUDA_DYNLOAD + #include + // Do not use CUDA SDK headers when using CUEW + // The macro below is used by Optix SDK and is necessary to avoid DSO loading collision + // See device_optix.cpp for example. + #define OPTIX_DONT_INCLUDE_CUDA +#else + #include +#endif +//FRL_CLR_END + +#endif // __cuda_cuda_h__ + +#if defined(_WIN64) || defined(__LP64__) || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) +#if (CUDA_VERSION >= 3020) && (!defined(CUDA_FORCE_API_VERSION) || (CUDA_FORCE_API_VERSION >= 3020)) +#define __CUVID_DEVPTR64 +#endif +#endif + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +typedef void *CUvideodecoder; +typedef struct _CUcontextlock_st *CUvideoctxlock; + +/*********************************************************************************/ +//! \enum cudaVideoCodec +//! Video codec enums +//! These enums are used in CUVIDDECODECREATEINFO and CUVIDDECODECAPS structures +/*********************************************************************************/ +typedef enum cudaVideoCodec_enum { + cudaVideoCodec_MPEG1=0, /**< MPEG1 */ + cudaVideoCodec_MPEG2, /**< MPEG2 */ + cudaVideoCodec_MPEG4, /**< MPEG4 */ + cudaVideoCodec_VC1, /**< VC1 */ + cudaVideoCodec_H264, /**< H264 */ + cudaVideoCodec_JPEG, /**< JPEG */ + cudaVideoCodec_H264_SVC, /**< H264-SVC */ + cudaVideoCodec_H264_MVC, /**< H264-MVC */ + cudaVideoCodec_HEVC, /**< HEVC */ + cudaVideoCodec_VP8, /**< VP8 */ + cudaVideoCodec_VP9, /**< VP9 */ + cudaVideoCodec_AV1, /**< AV1 */ + cudaVideoCodec_NumCodecs, /**< Max codecs */ + // Uncompressed YUV + cudaVideoCodec_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), /**< Y,U,V (4:2:0) */ + cudaVideoCodec_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,V,U (4:2:0) */ + cudaVideoCodec_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,UV (4:2:0) */ + cudaVideoCodec_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), /**< YUYV/YUY2 (4:2:2) */ + cudaVideoCodec_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')) /**< UYVY (4:2:2) */ +} cudaVideoCodec; + +/*********************************************************************************/ +//! \enum cudaVideoSurfaceFormat +//! Video surface format enums used for output format of decoded output +//! These enums are used in CUVIDDECODECREATEINFO structure +/*********************************************************************************/ +typedef enum cudaVideoSurfaceFormat_enum { + cudaVideoSurfaceFormat_NV12=0, /**< Semi-Planar YUV [Y plane followed by interleaved UV plane] */ + cudaVideoSurfaceFormat_P016=1, /**< 16 bit Semi-Planar YUV [Y plane followed by interleaved UV plane]. + Can be used for 10 bit(6LSB bits 0), 12 bit (4LSB bits 0) */ + cudaVideoSurfaceFormat_YUV444=2, /**< Planar YUV [Y plane followed by U and V planes] */ + cudaVideoSurfaceFormat_YUV444_16Bit=3, /**< 16 bit Planar YUV [Y plane followed by U and V planes]. + Can be used for 10 bit(6LSB bits 0), 12 bit (4LSB bits 0) */ +} cudaVideoSurfaceFormat; + +/******************************************************************************************************************/ +//! \enum cudaVideoDeinterlaceMode +//! Deinterlacing mode enums +//! These enums are used in CUVIDDECODECREATEINFO structure +//! Use cudaVideoDeinterlaceMode_Weave for progressive content and for content that doesn't need deinterlacing +//! cudaVideoDeinterlaceMode_Adaptive needs more video memory than other DImodes +/******************************************************************************************************************/ +typedef enum cudaVideoDeinterlaceMode_enum { + cudaVideoDeinterlaceMode_Weave=0, /**< Weave both fields (no deinterlacing) */ + cudaVideoDeinterlaceMode_Bob, /**< Drop one field */ + cudaVideoDeinterlaceMode_Adaptive /**< Adaptive deinterlacing */ +} cudaVideoDeinterlaceMode; + +/**************************************************************************************************************/ +//! \enum cudaVideoChromaFormat +//! Chroma format enums +//! These enums are used in CUVIDDECODECREATEINFO and CUVIDDECODECAPS structures +/**************************************************************************************************************/ +typedef enum cudaVideoChromaFormat_enum { + cudaVideoChromaFormat_Monochrome=0, /**< MonoChrome */ + cudaVideoChromaFormat_420, /**< YUV 4:2:0 */ + cudaVideoChromaFormat_422, /**< YUV 4:2:2 */ + cudaVideoChromaFormat_444 /**< YUV 4:4:4 */ +} cudaVideoChromaFormat; + +/*************************************************************************************************************/ +//! \enum cudaVideoCreateFlags +//! Decoder flag enums to select preferred decode path +//! cudaVideoCreate_Default and cudaVideoCreate_PreferCUVID are most optimized, use these whenever possible +/*************************************************************************************************************/ +typedef enum cudaVideoCreateFlags_enum { + cudaVideoCreate_Default = 0x00, /**< Default operation mode: use dedicated video engines */ + cudaVideoCreate_PreferCUDA = 0x01, /**< Use CUDA-based decoder (requires valid vidLock object for multi-threading) */ + cudaVideoCreate_PreferDXVA = 0x02, /**< Go through DXVA internally if possible (requires D3D9 interop) */ + cudaVideoCreate_PreferCUVID = 0x04 /**< Use dedicated video engines directly */ +} cudaVideoCreateFlags; + + +/*************************************************************************/ +//! \enum cuvidDecodeStatus +//! Decode status enums +//! These enums are used in CUVIDGETDECODESTATUS structure +/*************************************************************************/ +typedef enum cuvidDecodeStatus_enum +{ + cuvidDecodeStatus_Invalid = 0, // Decode status is not valid + cuvidDecodeStatus_InProgress = 1, // Decode is in progress + cuvidDecodeStatus_Success = 2, // Decode is completed without any errors + // 3 to 7 enums are reserved for future use + cuvidDecodeStatus_Error = 8, // Decode is completed with an error (error is not concealed) + cuvidDecodeStatus_Error_Concealed = 9, // Decode is completed with an error and error is concealed +} cuvidDecodeStatus; + +/**************************************************************************************************************/ +//! \struct CUVIDDECODECAPS; +//! This structure is used in cuvidGetDecoderCaps API +/**************************************************************************************************************/ +typedef struct _CUVIDDECODECAPS +{ + cudaVideoCodec eCodecType; /**< IN: cudaVideoCodec_XXX */ + cudaVideoChromaFormat eChromaFormat; /**< IN: cudaVideoChromaFormat_XXX */ + unsigned int nBitDepthMinus8; /**< IN: The Value "BitDepth minus 8" */ + unsigned int reserved1[3]; /**< Reserved for future use - set to zero */ + + unsigned char bIsSupported; /**< OUT: 1 if codec supported, 0 if not supported */ + unsigned char nNumNVDECs; /**< OUT: Number of NVDECs that can support IN params */ + unsigned short nOutputFormatMask; /**< OUT: each bit represents corresponding cudaVideoSurfaceFormat enum */ + unsigned int nMaxWidth; /**< OUT: Max supported coded width in pixels */ + unsigned int nMaxHeight; /**< OUT: Max supported coded height in pixels */ + unsigned int nMaxMBCount; /**< OUT: Max supported macroblock count + CodedWidth*CodedHeight/256 must be <= nMaxMBCount */ + unsigned short nMinWidth; /**< OUT: Min supported coded width in pixels */ + unsigned short nMinHeight; /**< OUT: Min supported coded height in pixels */ + unsigned char bIsHistogramSupported; /**< OUT: 1 if Y component histogram output is supported, 0 if not + Note: histogram is computed on original picture data before + any post-processing like scaling, cropping, etc. is applied */ + unsigned char nCounterBitDepth; /**< OUT: histogram counter bit depth */ + unsigned short nMaxHistogramBins; /**< OUT: Max number of histogram bins */ + unsigned int reserved3[10]; /**< Reserved for future use - set to zero */ +} CUVIDDECODECAPS; + +/**************************************************************************************************************/ +//! \struct CUVIDDECODECREATEINFO +//! This structure is used in cuvidCreateDecoder API +/**************************************************************************************************************/ +typedef struct _CUVIDDECODECREATEINFO +{ + unsigned long ulWidth; /**< IN: Coded sequence width in pixels */ + unsigned long ulHeight; /**< IN: Coded sequence height in pixels */ + unsigned long ulNumDecodeSurfaces; /**< IN: Maximum number of internal decode surfaces */ + cudaVideoCodec CodecType; /**< IN: cudaVideoCodec_XXX */ + cudaVideoChromaFormat ChromaFormat; /**< IN: cudaVideoChromaFormat_XXX */ + unsigned long ulCreationFlags; /**< IN: Decoder creation flags (cudaVideoCreateFlags_XXX) */ + unsigned long bitDepthMinus8; /**< IN: The value "BitDepth minus 8" */ + unsigned long ulIntraDecodeOnly; /**< IN: Set 1 only if video has all intra frames (default value is 0). This will + optimize video memory for Intra frames only decoding. The support is limited + to specific codecs - H264, HEVC, VP9, the flag will be ignored for codecs which + are not supported. However decoding might fail if the flag is enabled in case + of supported codecs for regular bit streams having P and/or B frames. */ + unsigned long ulMaxWidth; /**< IN: Coded sequence max width in pixels used with reconfigure Decoder */ + unsigned long ulMaxHeight; /**< IN: Coded sequence max height in pixels used with reconfigure Decoder */ + unsigned long Reserved1; /**< Reserved for future use - set to zero */ + /** + * IN: area of the frame that should be displayed + */ + struct { + short left; + short top; + short right; + short bottom; + } display_area; + + cudaVideoSurfaceFormat OutputFormat; /**< IN: cudaVideoSurfaceFormat_XXX */ + cudaVideoDeinterlaceMode DeinterlaceMode; /**< IN: cudaVideoDeinterlaceMode_XXX */ + unsigned long ulTargetWidth; /**< IN: Post-processed output width (Should be aligned to 2) */ + unsigned long ulTargetHeight; /**< IN: Post-processed output height (Should be aligned to 2) */ + unsigned long ulNumOutputSurfaces; /**< IN: Maximum number of output surfaces simultaneously mapped */ + CUvideoctxlock vidLock; /**< IN: If non-NULL, context lock used for synchronizing ownership of + the cuda context. Needed for cudaVideoCreate_PreferCUDA decode */ + /** + * IN: target rectangle in the output frame (for aspect ratio conversion) + * if a null rectangle is specified, {0,0,ulTargetWidth,ulTargetHeight} will be used + */ + struct { + short left; + short top; + short right; + short bottom; + } target_rect; + + unsigned long enableHistogram; /**< IN: enable histogram output, if supported */ + unsigned long Reserved2[4]; /**< Reserved for future use - set to zero */ +} CUVIDDECODECREATEINFO; + +/*********************************************************/ +//! \struct CUVIDH264DPBENTRY +//! H.264 DPB entry +//! This structure is used in CUVIDH264PICPARAMS structure +/*********************************************************/ +typedef struct _CUVIDH264DPBENTRY +{ + int PicIdx; /**< picture index of reference frame */ + int FrameIdx; /**< frame_num(short-term) or LongTermFrameIdx(long-term) */ + int is_long_term; /**< 0=short term reference, 1=long term reference */ + int not_existing; /**< non-existing reference frame (corresponding PicIdx should be set to -1) */ + int used_for_reference; /**< 0=unused, 1=top_field, 2=bottom_field, 3=both_fields */ + int FieldOrderCnt[2]; /**< field order count of top and bottom fields */ +} CUVIDH264DPBENTRY; + +/************************************************************/ +//! \struct CUVIDH264MVCEXT +//! H.264 MVC picture parameters ext +//! This structure is used in CUVIDH264PICPARAMS structure +/************************************************************/ +typedef struct _CUVIDH264MVCEXT +{ + int num_views_minus1; /**< Max number of coded views minus 1 in video : Range - 0 to 1023 */ + int view_id; /**< view identifier */ + unsigned char inter_view_flag; /**< 1 if used for inter-view prediction, 0 if not */ + unsigned char num_inter_view_refs_l0; /**< number of inter-view ref pics in RefPicList0 */ + unsigned char num_inter_view_refs_l1; /**< number of inter-view ref pics in RefPicList1 */ + unsigned char MVCReserved8Bits; /**< Reserved bits */ + int InterViewRefsL0[16]; /**< view id of the i-th view component for inter-view prediction in RefPicList0 */ + int InterViewRefsL1[16]; /**< view id of the i-th view component for inter-view prediction in RefPicList1 */ +} CUVIDH264MVCEXT; + +/*********************************************************/ +//! \struct CUVIDH264SVCEXT +//! H.264 SVC picture parameters ext +//! This structure is used in CUVIDH264PICPARAMS structure +/*********************************************************/ +typedef struct _CUVIDH264SVCEXT +{ + unsigned char profile_idc; + unsigned char level_idc; + unsigned char DQId; + unsigned char DQIdMax; + unsigned char disable_inter_layer_deblocking_filter_idc; + unsigned char ref_layer_chroma_phase_y_plus1; + signed char inter_layer_slice_alpha_c0_offset_div2; + signed char inter_layer_slice_beta_offset_div2; + + unsigned short DPBEntryValidFlag; + unsigned char inter_layer_deblocking_filter_control_present_flag; + unsigned char extended_spatial_scalability_idc; + unsigned char adaptive_tcoeff_level_prediction_flag; + unsigned char slice_header_restriction_flag; + unsigned char chroma_phase_x_plus1_flag; + unsigned char chroma_phase_y_plus1; + + unsigned char tcoeff_level_prediction_flag; + unsigned char constrained_intra_resampling_flag; + unsigned char ref_layer_chroma_phase_x_plus1_flag; + unsigned char store_ref_base_pic_flag; + unsigned char Reserved8BitsA; + unsigned char Reserved8BitsB; + + short scaled_ref_layer_left_offset; + short scaled_ref_layer_top_offset; + short scaled_ref_layer_right_offset; + short scaled_ref_layer_bottom_offset; + unsigned short Reserved16Bits; + struct _CUVIDPICPARAMS *pNextLayer; /**< Points to the picparams for the next layer to be decoded. + Linked list ends at the target layer. */ + int bRefBaseLayer; /**< whether to store ref base pic */ +} CUVIDH264SVCEXT; + +/******************************************************/ +//! \struct CUVIDH264PICPARAMS +//! H.264 picture parameters +//! This structure is used in CUVIDPICPARAMS structure +/******************************************************/ +typedef struct _CUVIDH264PICPARAMS +{ + // SPS + int log2_max_frame_num_minus4; + int pic_order_cnt_type; + int log2_max_pic_order_cnt_lsb_minus4; + int delta_pic_order_always_zero_flag; + int frame_mbs_only_flag; + int direct_8x8_inference_flag; + int num_ref_frames; // NOTE: shall meet level 4.1 restrictions + unsigned char residual_colour_transform_flag; + unsigned char bit_depth_luma_minus8; // Must be 0 (only 8-bit supported) + unsigned char bit_depth_chroma_minus8; // Must be 0 (only 8-bit supported) + unsigned char qpprime_y_zero_transform_bypass_flag; + // PPS + int entropy_coding_mode_flag; + int pic_order_present_flag; + int num_ref_idx_l0_active_minus1; + int num_ref_idx_l1_active_minus1; + int weighted_pred_flag; + int weighted_bipred_idc; + int pic_init_qp_minus26; + int deblocking_filter_control_present_flag; + int redundant_pic_cnt_present_flag; + int transform_8x8_mode_flag; + int MbaffFrameFlag; + int constrained_intra_pred_flag; + int chroma_qp_index_offset; + int second_chroma_qp_index_offset; + int ref_pic_flag; + int frame_num; + int CurrFieldOrderCnt[2]; + // DPB + CUVIDH264DPBENTRY dpb[16]; // List of reference frames within the DPB + // Quantization Matrices (raster-order) + unsigned char WeightScale4x4[6][16]; + unsigned char WeightScale8x8[2][64]; + // FMO/ASO + unsigned char fmo_aso_enable; + unsigned char num_slice_groups_minus1; + unsigned char slice_group_map_type; + signed char pic_init_qs_minus26; + unsigned int slice_group_change_rate_minus1; + union + { + unsigned long long slice_group_map_addr; + const unsigned char *pMb2SliceGroupMap; + } fmo; + unsigned int Reserved[12]; + // SVC/MVC + union + { + CUVIDH264MVCEXT mvcext; + CUVIDH264SVCEXT svcext; + }; +} CUVIDH264PICPARAMS; + + +/********************************************************/ +//! \struct CUVIDMPEG2PICPARAMS +//! MPEG-2 picture parameters +//! This structure is used in CUVIDPICPARAMS structure +/********************************************************/ +typedef struct _CUVIDMPEG2PICPARAMS +{ + int ForwardRefIdx; // Picture index of forward reference (P/B-frames) + int BackwardRefIdx; // Picture index of backward reference (B-frames) + int picture_coding_type; + int full_pel_forward_vector; + int full_pel_backward_vector; + int f_code[2][2]; + int intra_dc_precision; + int frame_pred_frame_dct; + int concealment_motion_vectors; + int q_scale_type; + int intra_vlc_format; + int alternate_scan; + int top_field_first; + // Quantization matrices (raster order) + unsigned char QuantMatrixIntra[64]; + unsigned char QuantMatrixInter[64]; +} CUVIDMPEG2PICPARAMS; + +// MPEG-4 has VOP types instead of Picture types +#define I_VOP 0 +#define P_VOP 1 +#define B_VOP 2 +#define S_VOP 3 + +/*******************************************************/ +//! \struct CUVIDMPEG4PICPARAMS +//! MPEG-4 picture parameters +//! This structure is used in CUVIDPICPARAMS structure +/*******************************************************/ +typedef struct _CUVIDMPEG4PICPARAMS +{ + int ForwardRefIdx; // Picture index of forward reference (P/B-frames) + int BackwardRefIdx; // Picture index of backward reference (B-frames) + // VOL + int video_object_layer_width; + int video_object_layer_height; + int vop_time_increment_bitcount; + int top_field_first; + int resync_marker_disable; + int quant_type; + int quarter_sample; + int short_video_header; + int divx_flags; + // VOP + int vop_coding_type; + int vop_coded; + int vop_rounding_type; + int alternate_vertical_scan_flag; + int interlaced; + int vop_fcode_forward; + int vop_fcode_backward; + int trd[2]; + int trb[2]; + // Quantization matrices (raster order) + unsigned char QuantMatrixIntra[64]; + unsigned char QuantMatrixInter[64]; + int gmc_enabled; +} CUVIDMPEG4PICPARAMS; + +/********************************************************/ +//! \struct CUVIDVC1PICPARAMS +//! VC1 picture parameters +//! This structure is used in CUVIDPICPARAMS structure +/********************************************************/ +typedef struct _CUVIDVC1PICPARAMS +{ + int ForwardRefIdx; /**< Picture index of forward reference (P/B-frames) */ + int BackwardRefIdx; /**< Picture index of backward reference (B-frames) */ + int FrameWidth; /**< Actual frame width */ + int FrameHeight; /**< Actual frame height */ + // PICTURE + int intra_pic_flag; /**< Set to 1 for I,BI frames */ + int ref_pic_flag; /**< Set to 1 for I,P frames */ + int progressive_fcm; /**< Progressive frame */ + // SEQUENCE + int profile; + int postprocflag; + int pulldown; + int interlace; + int tfcntrflag; + int finterpflag; + int psf; + int multires; + int syncmarker; + int rangered; + int maxbframes; + // ENTRYPOINT + int panscan_flag; + int refdist_flag; + int extended_mv; + int dquant; + int vstransform; + int loopfilter; + int fastuvmc; + int overlap; + int quantizer; + int extended_dmv; + int range_mapy_flag; + int range_mapy; + int range_mapuv_flag; + int range_mapuv; + int rangeredfrm; // range reduction state +} CUVIDVC1PICPARAMS; + +/***********************************************************/ +//! \struct CUVIDJPEGPICPARAMS +//! JPEG picture parameters +//! This structure is used in CUVIDPICPARAMS structure +/***********************************************************/ +typedef struct _CUVIDJPEGPICPARAMS +{ + int Reserved; +} CUVIDJPEGPICPARAMS; + + +/*******************************************************/ +//! \struct CUVIDHEVCPICPARAMS +//! HEVC picture parameters +//! This structure is used in CUVIDPICPARAMS structure +/*******************************************************/ +typedef struct _CUVIDHEVCPICPARAMS +{ + // sps + int pic_width_in_luma_samples; + int pic_height_in_luma_samples; + unsigned char log2_min_luma_coding_block_size_minus3; + unsigned char log2_diff_max_min_luma_coding_block_size; + unsigned char log2_min_transform_block_size_minus2; + unsigned char log2_diff_max_min_transform_block_size; + unsigned char pcm_enabled_flag; + unsigned char log2_min_pcm_luma_coding_block_size_minus3; + unsigned char log2_diff_max_min_pcm_luma_coding_block_size; + unsigned char pcm_sample_bit_depth_luma_minus1; + + unsigned char pcm_sample_bit_depth_chroma_minus1; + unsigned char pcm_loop_filter_disabled_flag; + unsigned char strong_intra_smoothing_enabled_flag; + unsigned char max_transform_hierarchy_depth_intra; + unsigned char max_transform_hierarchy_depth_inter; + unsigned char amp_enabled_flag; + unsigned char separate_colour_plane_flag; + unsigned char log2_max_pic_order_cnt_lsb_minus4; + + unsigned char num_short_term_ref_pic_sets; + unsigned char long_term_ref_pics_present_flag; + unsigned char num_long_term_ref_pics_sps; + unsigned char sps_temporal_mvp_enabled_flag; + unsigned char sample_adaptive_offset_enabled_flag; + unsigned char scaling_list_enable_flag; + unsigned char IrapPicFlag; + unsigned char IdrPicFlag; + + unsigned char bit_depth_luma_minus8; + unsigned char bit_depth_chroma_minus8; + //sps/pps extension fields + unsigned char log2_max_transform_skip_block_size_minus2; + unsigned char log2_sao_offset_scale_luma; + unsigned char log2_sao_offset_scale_chroma; + unsigned char high_precision_offsets_enabled_flag; + unsigned char reserved1[10]; + + // pps + unsigned char dependent_slice_segments_enabled_flag; + unsigned char slice_segment_header_extension_present_flag; + unsigned char sign_data_hiding_enabled_flag; + unsigned char cu_qp_delta_enabled_flag; + unsigned char diff_cu_qp_delta_depth; + signed char init_qp_minus26; + signed char pps_cb_qp_offset; + signed char pps_cr_qp_offset; + + unsigned char constrained_intra_pred_flag; + unsigned char weighted_pred_flag; + unsigned char weighted_bipred_flag; + unsigned char transform_skip_enabled_flag; + unsigned char transquant_bypass_enabled_flag; + unsigned char entropy_coding_sync_enabled_flag; + unsigned char log2_parallel_merge_level_minus2; + unsigned char num_extra_slice_header_bits; + + unsigned char loop_filter_across_tiles_enabled_flag; + unsigned char loop_filter_across_slices_enabled_flag; + unsigned char output_flag_present_flag; + unsigned char num_ref_idx_l0_default_active_minus1; + unsigned char num_ref_idx_l1_default_active_minus1; + unsigned char lists_modification_present_flag; + unsigned char cabac_init_present_flag; + unsigned char pps_slice_chroma_qp_offsets_present_flag; + + unsigned char deblocking_filter_override_enabled_flag; + unsigned char pps_deblocking_filter_disabled_flag; + signed char pps_beta_offset_div2; + signed char pps_tc_offset_div2; + unsigned char tiles_enabled_flag; + unsigned char uniform_spacing_flag; + unsigned char num_tile_columns_minus1; + unsigned char num_tile_rows_minus1; + + unsigned short column_width_minus1[21]; + unsigned short row_height_minus1[21]; + + // sps and pps extension HEVC-main 444 + unsigned char sps_range_extension_flag; + unsigned char transform_skip_rotation_enabled_flag; + unsigned char transform_skip_context_enabled_flag; + unsigned char implicit_rdpcm_enabled_flag; + + unsigned char explicit_rdpcm_enabled_flag; + unsigned char extended_precision_processing_flag; + unsigned char intra_smoothing_disabled_flag; + unsigned char persistent_rice_adaptation_enabled_flag; + + unsigned char cabac_bypass_alignment_enabled_flag; + unsigned char pps_range_extension_flag; + unsigned char cross_component_prediction_enabled_flag; + unsigned char chroma_qp_offset_list_enabled_flag; + + unsigned char diff_cu_chroma_qp_offset_depth; + unsigned char chroma_qp_offset_list_len_minus1; + signed char cb_qp_offset_list[6]; + + signed char cr_qp_offset_list[6]; + unsigned char reserved2[2]; + + unsigned int reserved3[8]; + + // RefPicSets + int NumBitsForShortTermRPSInSlice; + int NumDeltaPocsOfRefRpsIdx; + int NumPocTotalCurr; + int NumPocStCurrBefore; + int NumPocStCurrAfter; + int NumPocLtCurr; + int CurrPicOrderCntVal; + int RefPicIdx[16]; // [refpic] Indices of valid reference pictures (-1 if unused for reference) + int PicOrderCntVal[16]; // [refpic] + unsigned char IsLongTerm[16]; // [refpic] 0=not a long-term reference, 1=long-term reference + unsigned char RefPicSetStCurrBefore[8]; // [0..NumPocStCurrBefore-1] -> refpic (0..15) + unsigned char RefPicSetStCurrAfter[8]; // [0..NumPocStCurrAfter-1] -> refpic (0..15) + unsigned char RefPicSetLtCurr[8]; // [0..NumPocLtCurr-1] -> refpic (0..15) + unsigned char RefPicSetInterLayer0[8]; + unsigned char RefPicSetInterLayer1[8]; + unsigned int reserved4[12]; + + // scaling lists (diag order) + unsigned char ScalingList4x4[6][16]; // [matrixId][i] + unsigned char ScalingList8x8[6][64]; // [matrixId][i] + unsigned char ScalingList16x16[6][64]; // [matrixId][i] + unsigned char ScalingList32x32[2][64]; // [matrixId][i] + unsigned char ScalingListDCCoeff16x16[6]; // [matrixId] + unsigned char ScalingListDCCoeff32x32[2]; // [matrixId] +} CUVIDHEVCPICPARAMS; + + +/***********************************************************/ +//! \struct CUVIDVP8PICPARAMS +//! VP8 picture parameters +//! This structure is used in CUVIDPICPARAMS structure +/***********************************************************/ +typedef struct _CUVIDVP8PICPARAMS +{ + int width; + int height; + unsigned int first_partition_size; + //Frame Indexes + unsigned char LastRefIdx; + unsigned char GoldenRefIdx; + unsigned char AltRefIdx; + union { + struct { + unsigned char frame_type : 1; /**< 0 = KEYFRAME, 1 = INTERFRAME */ + unsigned char version : 3; + unsigned char show_frame : 1; + unsigned char update_mb_segmentation_data : 1; /**< Must be 0 if segmentation is not enabled */ + unsigned char Reserved2Bits : 2; + }vp8_frame_tag; + unsigned char wFrameTagFlags; + }; + unsigned char Reserved1[4]; + unsigned int Reserved2[3]; +} CUVIDVP8PICPARAMS; + +/***********************************************************/ +//! \struct CUVIDVP9PICPARAMS +//! VP9 picture parameters +//! This structure is used in CUVIDPICPARAMS structure +/***********************************************************/ +typedef struct _CUVIDVP9PICPARAMS +{ + unsigned int width; + unsigned int height; + + //Frame Indices + unsigned char LastRefIdx; + unsigned char GoldenRefIdx; + unsigned char AltRefIdx; + unsigned char colorSpace; + + unsigned short profile : 3; + unsigned short frameContextIdx : 2; + unsigned short frameType : 1; + unsigned short showFrame : 1; + unsigned short errorResilient : 1; + unsigned short frameParallelDecoding : 1; + unsigned short subSamplingX : 1; + unsigned short subSamplingY : 1; + unsigned short intraOnly : 1; + unsigned short allow_high_precision_mv : 1; + unsigned short refreshEntropyProbs : 1; + unsigned short reserved2Bits : 2; + + unsigned short reserved16Bits; + + unsigned char refFrameSignBias[4]; + + unsigned char bitDepthMinus8Luma; + unsigned char bitDepthMinus8Chroma; + unsigned char loopFilterLevel; + unsigned char loopFilterSharpness; + + unsigned char modeRefLfEnabled; + unsigned char log2_tile_columns; + unsigned char log2_tile_rows; + + unsigned char segmentEnabled : 1; + unsigned char segmentMapUpdate : 1; + unsigned char segmentMapTemporalUpdate : 1; + unsigned char segmentFeatureMode : 1; + unsigned char reserved4Bits : 4; + + + unsigned char segmentFeatureEnable[8][4]; + short segmentFeatureData[8][4]; + unsigned char mb_segment_tree_probs[7]; + unsigned char segment_pred_probs[3]; + unsigned char reservedSegment16Bits[2]; + + int qpYAc; + int qpYDc; + int qpChDc; + int qpChAc; + + unsigned int activeRefIdx[3]; + unsigned int resetFrameContext; + unsigned int mcomp_filter_type; + unsigned int mbRefLfDelta[4]; + unsigned int mbModeLfDelta[2]; + unsigned int frameTagSize; + unsigned int offsetToDctParts; + unsigned int reserved128Bits[4]; + +} CUVIDVP9PICPARAMS; + +/***********************************************************/ +//! \struct CUVIDAV1PICPARAMS +//! AV1 picture parameters +//! This structure is used in CUVIDPICPARAMS structure +/***********************************************************/ +typedef struct _CUVIDAV1PICPARAMS +{ + unsigned int width; // coded width, if superres enabled then it is upscaled width + unsigned int height; // coded height + unsigned int frame_offset; // defined as order_hint in AV1 specification + int decodePicIdx; // decoded output pic index, if film grain enabled, it will keep decoded (without film grain) output + // It can be used as reference frame for future frames + + // sequence header + unsigned int profile : 3; // 0 = profile0, 1 = profile1, 2 = profile2 + unsigned int use_128x128_superblock : 1; // superblock size 0:64x64, 1: 128x128 + unsigned int subsampling_x : 1; // (subsampling_x, _y) 1,1 = 420, 1,0 = 422, 0,0 = 444 + unsigned int subsampling_y : 1; + unsigned int mono_chrome : 1; // for monochrome content, mono_chrome = 1 and (subsampling_x, _y) should be 1,1 + unsigned int bit_depth_minus8 : 4; // bit depth minus 8 + unsigned int enable_filter_intra : 1; // tool enable in seq level, 0 : disable 1: frame header control + unsigned int enable_intra_edge_filter : 1; // intra edge filtering process, 0 : disable 1: enabled + unsigned int enable_interintra_compound : 1; // interintra, 0 : not present 1: present + unsigned int enable_masked_compound : 1; // 1: mode info for inter blocks may contain the syntax element compound_type. + // 0: syntax element compound_type will not be present + unsigned int enable_dual_filter : 1; // vertical and horiz filter selection, 1: enable and 0: disable + unsigned int enable_order_hint : 1; // order hint, and related tools, 1: enable and 0: disable + unsigned int order_hint_bits_minus1 : 3; // is used to compute OrderHintBits + unsigned int enable_jnt_comp : 1; // joint compound modes, 1: enable and 0: disable + unsigned int enable_superres : 1; // superres in seq level, 0 : disable 1: frame level control + unsigned int enable_cdef : 1; // cdef filtering in seq level, 0 : disable 1: frame level control + unsigned int enable_restoration : 1; // loop restoration filtering in seq level, 0 : disable 1: frame level control + unsigned int enable_fgs : 1; // defined as film_grain_params_present in AV1 specification + unsigned int reserved0_7bits : 7; // reserved bits; must be set to 0 + + // frame header + unsigned int frame_type : 2 ; // 0:Key frame, 1:Inter frame, 2:intra only, 3:s-frame + unsigned int show_frame : 1 ; // show_frame = 1 implies that frame should be immediately output once decoded + unsigned int disable_cdf_update : 1; // CDF update during symbol decoding, 1: disabled, 0: enabled + unsigned int allow_screen_content_tools : 1; // 1: intra blocks may use palette encoding, 0: palette encoding is never used + unsigned int force_integer_mv : 1; // 1: motion vectors will always be integers, 0: can contain fractional bits + unsigned int coded_denom : 3; // coded_denom of the superres scale as specified in AV1 specification + unsigned int allow_intrabc : 1; // 1: intra block copy may be used, 0: intra block copy is not allowed + unsigned int allow_high_precision_mv : 1; // 1/8 precision mv enable + unsigned int interp_filter : 3; // interpolation filter. Refer to section 6.8.9 of the AV1 specification Version 1.0.0 with Errata 1 + unsigned int switchable_motion_mode : 1; // defined as is_motion_mode_switchable in AV1 specification + unsigned int use_ref_frame_mvs : 1; // 1: current frame can use the previous frame mv information, 0: will not use. + unsigned int disable_frame_end_update_cdf : 1; // 1: indicates that the end of frame CDF update is disabled + unsigned int delta_q_present : 1; // quantizer index delta values are present in the block level + unsigned int delta_q_res : 2; // left shift which should be applied to decoded quantizer index delta values + unsigned int using_qmatrix : 1; // 1: quantizer matrix will be used to compute quantizers + unsigned int coded_lossless : 1; // 1: all segments use lossless coding + unsigned int use_superres : 1; // 1: superres enabled for frame + unsigned int tx_mode : 2; // 0: ONLY4x4,1:LARGEST,2:SELECT + unsigned int reference_mode : 1; // 0: SINGLE, 1: SELECT + unsigned int allow_warped_motion : 1; // 1: allow_warped_motion may be present, 0: allow_warped_motion will not be present + unsigned int reduced_tx_set : 1; // 1: frame is restricted to subset of the full set of transform types, 0: no such restriction + unsigned int skip_mode : 1; // 1: most of the mode info is skipped, 0: mode info is not skipped + unsigned int reserved1_3bits : 3; // reserved bits; must be set to 0 + + // tiling info + unsigned int num_tile_cols : 8; // number of tiles across the frame., max is 64 + unsigned int num_tile_rows : 8; // number of tiles down the frame., max is 64 + unsigned int context_update_tile_id : 16; // specifies which tile to use for the CDF update + unsigned short tile_widths[64]; // Width of each column in superblocks + unsigned short tile_heights[64]; // height of each row in superblocks + + // CDEF - refer to section 6.10.14 of the AV1 specification Version 1.0.0 with Errata 1 + unsigned char cdef_damping_minus_3 : 2; // controls the amount of damping in the deringing filter + unsigned char cdef_bits : 2; // the number of bits needed to specify which CDEF filter to apply + unsigned char reserved2_4bits : 4; // reserved bits; must be set to 0 + unsigned char cdef_y_strength[8]; // 0-3 bits: y_pri_strength, 4-7 bits y_sec_strength + unsigned char cdef_uv_strength[8]; // 0-3 bits: uv_pri_strength, 4-7 bits uv_sec_strength + + // SkipModeFrames + unsigned char SkipModeFrame0 : 4; // specifies the frames to use for compound prediction when skip_mode is equal to 1. + unsigned char SkipModeFrame1 : 4; + + // qp information - refer to section 6.8.11 of the AV1 specification Version 1.0.0 with Errata 1 + unsigned char base_qindex; // indicates the base frame qindex. Defined as base_q_idx in AV1 specification + char qp_y_dc_delta_q; // indicates the Y DC quantizer relative to base_q_idx. Defined as DeltaQYDc in AV1 specification + char qp_u_dc_delta_q; // indicates the U DC quantizer relative to base_q_idx. Defined as DeltaQUDc in AV1 specification + char qp_v_dc_delta_q; // indicates the V DC quantizer relative to base_q_idx. Defined as DeltaQVDc in AV1 specification + char qp_u_ac_delta_q; // indicates the U AC quantizer relative to base_q_idx. Defined as DeltaQUAc in AV1 specification + char qp_v_ac_delta_q; // indicates the V AC quantizer relative to base_q_idx. Defined as DeltaQVAc in AV1 specification + unsigned char qm_y; // specifies the level in the quantizer matrix that should be used for luma plane decoding + unsigned char qm_u; // specifies the level in the quantizer matrix that should be used for chroma U plane decoding + unsigned char qm_v; // specifies the level in the quantizer matrix that should be used for chroma V plane decoding + + // segmentation - refer to section 6.8.13 of the AV1 specification Version 1.0.0 with Errata 1 + unsigned char segmentation_enabled : 1; // 1 indicates that this frame makes use of the segmentation tool + unsigned char segmentation_update_map : 1; // 1 indicates that the segmentation map are updated during the decoding of this frame + unsigned char segmentation_update_data : 1; // 1 indicates that new parameters are about to be specified for each segment + unsigned char segmentation_temporal_update : 1; // 1 indicates that the updates to the segmentation map are coded relative to the existing segmentation map + unsigned char reserved3_4bits : 4; // reserved bits; must be set to 0 + short segmentation_feature_data[8][8]; // specifies the feature data for a segment feature + unsigned char segmentation_feature_mask[8]; // indicates that the corresponding feature is unused or feature value is coded + + // loopfilter - refer to section 6.8.10 of the AV1 specification Version 1.0.0 with Errata 1 + unsigned char loop_filter_level[2]; // contains loop filter strength values + unsigned char loop_filter_level_u; // loop filter strength value of U plane + unsigned char loop_filter_level_v; // loop filter strength value of V plane + unsigned char loop_filter_sharpness; // indicates the sharpness level + char loop_filter_ref_deltas[8]; // contains the adjustment needed for the filter level based on the chosen reference frame + char loop_filter_mode_deltas[2]; // contains the adjustment needed for the filter level based on the chosen mode + unsigned char loop_filter_delta_enabled : 1; // indicates that the filter level depends on the mode and reference frame used to predict a block + unsigned char loop_filter_delta_update : 1; // indicates that additional syntax elements are present that specify which mode and + // reference frame deltas are to be updated + unsigned char delta_lf_present : 1; // specifies whether loop filter delta values are present in the block level + unsigned char delta_lf_res : 2; // specifies the left shift to apply to the decoded loop filter values + unsigned char delta_lf_multi : 1; // separate loop filter deltas for Hy,Vy,U,V edges + unsigned char reserved4_2bits : 2; // reserved bits; must be set to 0 + + // restoration - refer to section 6.10.15 of the AV1 specification Version 1.0.0 with Errata 1 + unsigned char lr_unit_size[3]; // specifies the size of loop restoration units: 0: 32, 1: 64, 2: 128, 3: 256 + unsigned char lr_type[3] ; // used to compute FrameRestorationType + + // reference frames + unsigned char primary_ref_frame; // specifies which reference frame contains the CDF values and other state that should be + // loaded at the start of the frame + unsigned char ref_frame_map[8]; // frames in dpb that can be used as reference for current or future frames + + unsigned char temporal_layer_id : 4; // temporal layer id + unsigned char spatial_layer_id : 4; // spatial layer id + + unsigned char reserved5_32bits[4]; // reserved bits; must be set to 0 + + // ref frame list + struct + { + unsigned int width; + unsigned int height; + unsigned char index; + unsigned char reserved24Bits[3]; // reserved bits; must be set to 0 + } ref_frame[7]; // frames used as reference frame for current frame. + + // global motion + struct { + unsigned char invalid : 1; + unsigned char wmtype : 2; // defined as GmType in AV1 specification + unsigned char reserved5Bits : 5; // reserved bits; must be set to 0 + char reserved24Bits[3]; // reserved bits; must be set to 0 + int wmmat[6]; // defined as gm_params[] in AV1 specification + } global_motion[7]; // global motion params for reference frames + + // film grain params - refer to section 6.8.20 of the AV1 specification Version 1.0.0 with Errata 1 + unsigned short apply_grain : 1; + unsigned short overlap_flag : 1; + unsigned short scaling_shift_minus8 : 2; + unsigned short chroma_scaling_from_luma : 1; + unsigned short ar_coeff_lag : 2; + unsigned short ar_coeff_shift_minus6 : 2; + unsigned short grain_scale_shift : 2; + unsigned short clip_to_restricted_range : 1; + unsigned short reserved6_4bits : 4; // reserved bits; must be set to 0 + unsigned char num_y_points; + unsigned char scaling_points_y[14][2]; + unsigned char num_cb_points; + unsigned char scaling_points_cb[10][2]; + unsigned char num_cr_points; + unsigned char scaling_points_cr[10][2]; + unsigned char reserved7_8bits; // reserved bits; must be set to 0 + unsigned short random_seed; + short ar_coeffs_y[24]; + short ar_coeffs_cb[25]; + short ar_coeffs_cr[25]; + unsigned char cb_mult; + unsigned char cb_luma_mult; + short cb_offset; + unsigned char cr_mult; + unsigned char cr_luma_mult; + short cr_offset; + + int reserved[7]; // reserved bits; must be set to 0 +} CUVIDAV1PICPARAMS; + +/******************************************************************************************/ +//! \struct CUVIDPICPARAMS +//! Picture parameters for decoding +//! This structure is used in cuvidDecodePicture API +//! IN for cuvidDecodePicture +/******************************************************************************************/ +typedef struct _CUVIDPICPARAMS +{ + int PicWidthInMbs; /**< IN: Coded frame size in macroblocks */ + int FrameHeightInMbs; /**< IN: Coded frame height in macroblocks */ + int CurrPicIdx; /**< IN: Output index of the current picture */ + int field_pic_flag; /**< IN: 0=frame picture, 1=field picture */ + int bottom_field_flag; /**< IN: 0=top field, 1=bottom field (ignored if field_pic_flag=0) */ + int second_field; /**< IN: Second field of a complementary field pair */ + // Bitstream data + unsigned int nBitstreamDataLen; /**< IN: Number of bytes in bitstream data buffer */ + const unsigned char *pBitstreamData; /**< IN: Ptr to bitstream data for this picture (slice-layer) */ + unsigned int nNumSlices; /**< IN: Number of slices in this picture */ + const unsigned int *pSliceDataOffsets; /**< IN: nNumSlices entries, contains offset of each slice within + the bitstream data buffer */ + int ref_pic_flag; /**< IN: This picture is a reference picture */ + int intra_pic_flag; /**< IN: This picture is entirely intra coded */ + unsigned int Reserved[30]; /**< Reserved for future use */ + // IN: Codec-specific data + union { + CUVIDMPEG2PICPARAMS mpeg2; /**< Also used for MPEG-1 */ + CUVIDH264PICPARAMS h264; + CUVIDVC1PICPARAMS vc1; + CUVIDMPEG4PICPARAMS mpeg4; + CUVIDJPEGPICPARAMS jpeg; + CUVIDHEVCPICPARAMS hevc; + CUVIDVP8PICPARAMS vp8; + CUVIDVP9PICPARAMS vp9; + CUVIDAV1PICPARAMS av1; + unsigned int CodecReserved[1024]; + } CodecSpecific; +} CUVIDPICPARAMS; + + +/******************************************************/ +//! \struct CUVIDPROCPARAMS +//! Picture parameters for postprocessing +//! This structure is used in cuvidMapVideoFrame API +/******************************************************/ +typedef struct _CUVIDPROCPARAMS +{ + int progressive_frame; /**< IN: Input is progressive (deinterlace_mode will be ignored) */ + int second_field; /**< IN: Output the second field (ignored if deinterlace mode is Weave) */ + int top_field_first; /**< IN: Input frame is top field first (1st field is top, 2nd field is bottom) */ + int unpaired_field; /**< IN: Input only contains one field (2nd field is invalid) */ + // The fields below are used for raw YUV input + unsigned int reserved_flags; /**< Reserved for future use (set to zero) */ + unsigned int reserved_zero; /**< Reserved (set to zero) */ + unsigned long long raw_input_dptr; /**< IN: Input CUdeviceptr for raw YUV extensions */ + unsigned int raw_input_pitch; /**< IN: pitch in bytes of raw YUV input (should be aligned appropriately) */ + unsigned int raw_input_format; /**< IN: Input YUV format (cudaVideoCodec_enum) */ + unsigned long long raw_output_dptr; /**< IN: Output CUdeviceptr for raw YUV extensions */ + unsigned int raw_output_pitch; /**< IN: pitch in bytes of raw YUV output (should be aligned appropriately) */ + unsigned int Reserved1; /**< Reserved for future use (set to zero) */ + CUstream output_stream; /**< IN: stream object used by cuvidMapVideoFrame */ + unsigned int Reserved[46]; /**< Reserved for future use (set to zero) */ + unsigned long long *histogram_dptr; /**< OUT: Output CUdeviceptr for histogram extensions */ + void *Reserved2[1]; /**< Reserved for future use (set to zero) */ +} CUVIDPROCPARAMS; + +/*********************************************************************************************************/ +//! \struct CUVIDGETDECODESTATUS +//! Struct for reporting decode status. +//! This structure is used in cuvidGetDecodeStatus API. +/*********************************************************************************************************/ +typedef struct _CUVIDGETDECODESTATUS +{ + cuvidDecodeStatus decodeStatus; + unsigned int reserved[31]; + void *pReserved[8]; +} CUVIDGETDECODESTATUS; + +/****************************************************/ +//! \struct CUVIDRECONFIGUREDECODERINFO +//! Struct for decoder reset +//! This structure is used in cuvidReconfigureDecoder() API +/****************************************************/ +typedef struct _CUVIDRECONFIGUREDECODERINFO +{ + unsigned int ulWidth; /**< IN: Coded sequence width in pixels, MUST be < = ulMaxWidth defined at CUVIDDECODECREATEINFO */ + unsigned int ulHeight; /**< IN: Coded sequence height in pixels, MUST be < = ulMaxHeight defined at CUVIDDECODECREATEINFO */ + unsigned int ulTargetWidth; /**< IN: Post processed output width */ + unsigned int ulTargetHeight; /**< IN: Post Processed output height */ + unsigned int ulNumDecodeSurfaces; /**< IN: Maximum number of internal decode surfaces */ + unsigned int reserved1[12]; /**< Reserved for future use. Set to Zero */ + /** + * IN: Area of frame to be displayed. Use-case : Source Cropping + */ + struct { + short left; + short top; + short right; + short bottom; + } display_area; + /** + * IN: Target Rectangle in the OutputFrame. Use-case : Aspect ratio Conversion + */ + struct { + short left; + short top; + short right; + short bottom; + } target_rect; + unsigned int reserved2[11]; /**< Reserved for future use. Set to Zero */ +} CUVIDRECONFIGUREDECODERINFO; + + +/***********************************************************************************************************/ +//! VIDEO_DECODER +//! +//! In order to minimize decode latencies, there should be always at least 2 pictures in the decode +//! queue at any time, in order to make sure that all decode engines are always busy. +//! +//! Overall data flow: +//! - cuvidGetDecoderCaps(...) +//! - cuvidCreateDecoder(...) +//! - For each picture: +//! + cuvidDecodePicture(N) +//! + cuvidMapVideoFrame(N-4) +//! + do some processing in cuda +//! + cuvidUnmapVideoFrame(N-4) +//! + cuvidDecodePicture(N+1) +//! + cuvidMapVideoFrame(N-3) +//! + ... +//! - cuvidDestroyDecoder(...) +//! +//! NOTE: +//! - When the cuda context is created from a D3D device, the D3D device must also be created +//! with the D3DCREATE_MULTITHREADED flag. +//! - There is a limit to how many pictures can be mapped simultaneously (ulNumOutputSurfaces) +//! - cuvidDecodePicture may block the calling thread if there are too many pictures pending +//! in the decode queue +/***********************************************************************************************************/ + + +/**********************************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidGetDecoderCaps(CUVIDDECODECAPS *pdc) +//! Queries decode capabilities of NVDEC-HW based on CodecType, ChromaFormat and BitDepthMinus8 parameters. +//! 1. Application fills IN parameters CodecType, ChromaFormat and BitDepthMinus8 of CUVIDDECODECAPS structure +//! 2. On calling cuvidGetDecoderCaps, driver fills OUT parameters if the IN parameters are supported +//! If IN parameters passed to the driver are not supported by NVDEC-HW, then all OUT params are set to 0. +//! E.g. on Geforce GTX 960: +//! App fills - eCodecType = cudaVideoCodec_H264; eChromaFormat = cudaVideoChromaFormat_420; nBitDepthMinus8 = 0; +//! Given IN parameters are supported, hence driver fills: bIsSupported = 1; nMinWidth = 48; nMinHeight = 16; +//! nMaxWidth = 4096; nMaxHeight = 4096; nMaxMBCount = 65536; +//! CodedWidth*CodedHeight/256 must be less than or equal to nMaxMBCount +/**********************************************************************************************************************/ +extern CUresult CUDAAPI cuvidGetDecoderCaps(CUVIDDECODECAPS *pdc); + +/*****************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidCreateDecoder(CUvideodecoder *phDecoder, CUVIDDECODECREATEINFO *pdci) +//! Create the decoder object based on pdci. A handle to the created decoder is returned +/*****************************************************************************************************/ +extern CUresult CUDAAPI cuvidCreateDecoder(CUvideodecoder *phDecoder, CUVIDDECODECREATEINFO *pdci); + +/*****************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidDestroyDecoder(CUvideodecoder hDecoder) +//! Destroy the decoder object +/*****************************************************************************************************/ +extern CUresult CUDAAPI cuvidDestroyDecoder(CUvideodecoder hDecoder); + +/*****************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidDecodePicture(CUvideodecoder hDecoder, CUVIDPICPARAMS *pPicParams) +//! Decode a single picture (field or frame) +//! Kicks off HW decoding +/*****************************************************************************************************/ +extern CUresult CUDAAPI cuvidDecodePicture(CUvideodecoder hDecoder, CUVIDPICPARAMS *pPicParams); + +/************************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidGetDecodeStatus(CUvideodecoder hDecoder, int nPicIdx); +//! Get the decode status for frame corresponding to nPicIdx +//! API is supported for Maxwell and above generation GPUs. +//! API is currently supported for HEVC, H264 and JPEG codecs. +//! API returns CUDA_ERROR_NOT_SUPPORTED error code for unsupported GPU or codec. +/************************************************************************************************************/ +extern CUresult CUDAAPI cuvidGetDecodeStatus(CUvideodecoder hDecoder, int nPicIdx, CUVIDGETDECODESTATUS* pDecodeStatus); + +/*********************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidReconfigureDecoder(CUvideodecoder hDecoder, CUVIDRECONFIGUREDECODERINFO *pDecReconfigParams) +//! Used to reuse single decoder for multiple clips. Currently supports resolution change, resize params, display area +//! params, target area params change for same codec. Must be called during CUVIDPARSERPARAMS::pfnSequenceCallback +/*********************************************************************************************************/ +extern CUresult CUDAAPI cuvidReconfigureDecoder(CUvideodecoder hDecoder, CUVIDRECONFIGUREDECODERINFO *pDecReconfigParams); + + +#if !defined(__CUVID_DEVPTR64) || defined(__CUVID_INTERNAL) +/************************************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidMapVideoFrame(CUvideodecoder hDecoder, int nPicIdx, unsigned int *pDevPtr, +//! unsigned int *pPitch, CUVIDPROCPARAMS *pVPP); +//! Post-process and map video frame corresponding to nPicIdx for use in cuda. Returns cuda device pointer and associated +//! pitch of the video frame +/************************************************************************************************************************/ +extern CUresult CUDAAPI cuvidMapVideoFrame(CUvideodecoder hDecoder, int nPicIdx, + unsigned int *pDevPtr, unsigned int *pPitch, + CUVIDPROCPARAMS *pVPP); + +/*****************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidUnmapVideoFrame(CUvideodecoder hDecoder, unsigned int DevPtr) +//! Unmap a previously mapped video frame +/*****************************************************************************************************/ +extern CUresult CUDAAPI cuvidUnmapVideoFrame(CUvideodecoder hDecoder, unsigned int DevPtr); +#endif + +#if defined(_WIN64) || defined(__LP64__) || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) +/****************************************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidMapVideoFrame64(CUvideodecoder hDecoder, int nPicIdx, unsigned long long *pDevPtr, +//! unsigned int * pPitch, CUVIDPROCPARAMS *pVPP); +//! Post-process and map video frame corresponding to nPicIdx for use in cuda. Returns cuda device pointer and associated +//! pitch of the video frame +/****************************************************************************************************************************/ +extern CUresult CUDAAPI cuvidMapVideoFrame64(CUvideodecoder hDecoder, int nPicIdx, unsigned long long *pDevPtr, + unsigned int *pPitch, CUVIDPROCPARAMS *pVPP); + +/**************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsigned long long DevPtr); +//! Unmap a previously mapped video frame +/**************************************************************************************************/ +extern CUresult CUDAAPI cuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsigned long long DevPtr); + +#if defined(__CUVID_DEVPTR64) && !defined(__CUVID_INTERNAL) +#define cuvidMapVideoFrame cuvidMapVideoFrame64 +#define cuvidUnmapVideoFrame cuvidUnmapVideoFrame64 +#endif +#endif + + + +/********************************************************************************************************************/ +//! +//! Context-locking: to facilitate multi-threaded implementations, the following 4 functions +//! provide a simple mutex-style host synchronization. If a non-NULL context is specified +//! in CUVIDDECODECREATEINFO, the codec library will acquire the mutex associated with the given +//! context before making any cuda calls. +//! A multi-threaded application could create a lock associated with a context handle so that +//! multiple threads can safely share the same cuda context: +//! - use cuCtxPopCurrent immediately after context creation in order to create a 'floating' context +//! that can be passed to cuvidCtxLockCreate. +//! - When using a floating context, all cuda calls should only be made within a cuvidCtxLock/cuvidCtxUnlock section. +//! +//! NOTE: This is a safer alternative to cuCtxPushCurrent and cuCtxPopCurrent, and is not related to video +//! decoder in any way (implemented as a critical section associated with cuCtx{Push|Pop}Current calls). +/********************************************************************************************************************/ + +/********************************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidCtxLockCreate(CUvideoctxlock *pLock, CUcontext ctx) +//! This API is used to create CtxLock object +/********************************************************************************************************************/ +extern CUresult CUDAAPI cuvidCtxLockCreate(CUvideoctxlock *pLock, CUcontext ctx); + +/********************************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidCtxLockDestroy(CUvideoctxlock lck) +//! This API is used to free CtxLock object +/********************************************************************************************************************/ +extern CUresult CUDAAPI cuvidCtxLockDestroy(CUvideoctxlock lck); + +/********************************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidCtxLock(CUvideoctxlock lck, unsigned int reserved_flags) +//! This API is used to acquire ctxlock +/********************************************************************************************************************/ +extern CUresult CUDAAPI cuvidCtxLock(CUvideoctxlock lck, unsigned int reserved_flags); + +/********************************************************************************************************************/ +//! \fn CUresult CUDAAPI cuvidCtxUnlock(CUvideoctxlock lck, unsigned int reserved_flags) +//! This API is used to release ctxlock +/********************************************************************************************************************/ +extern CUresult CUDAAPI cuvidCtxUnlock(CUvideoctxlock lck, unsigned int reserved_flags); + +/**********************************************************************************************/ + + +#if defined(__cplusplus) +} +// Auto-lock helper for C++ applications +class CCtxAutoLock +{ +private: + CUvideoctxlock m_ctx; +public: + CCtxAutoLock(CUvideoctxlock ctx):m_ctx(ctx) { cuvidCtxLock(m_ctx,0); } + ~CCtxAutoLock() { cuvidCtxUnlock(m_ctx,0); } +}; +#endif /* __cplusplus */ + +#endif // __CUDA_VIDEO_H__ diff --git a/extern/nvidia/Video_Codec_SDK_11.1.5/Interface/nvEncodeAPI.h b/extern/nvidia/Video_Codec_SDK_11.1.5/Interface/nvEncodeAPI.h new file mode 100644 index 00000000000..2d9686f58b1 --- /dev/null +++ b/extern/nvidia/Video_Codec_SDK_11.1.5/Interface/nvEncodeAPI.h @@ -0,0 +1,3907 @@ +/* + * This copyright notice applies to this header file only: + * + * Copyright (c) 2010-2021 NVIDIA Corporation + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the software, and to permit persons to whom the + * software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/** + * \file nvEncodeAPI.h + * NVIDIA GPUs - beginning with the Kepler generation - contain a hardware-based encoder + * (referred to as NVENC) which provides fully-accelerated hardware-based video encoding. + * NvEncodeAPI provides the interface for NVIDIA video encoder (NVENC). + * \date 2011-2020 + * This file contains the interface constants, structure definitions and function prototypes. + */ + +#ifndef _NV_ENCODEAPI_H_ +#define _NV_ENCODEAPI_H_ + +#include + +#ifdef _WIN32 +#include +#endif + +#ifdef _MSC_VER +#ifndef _STDINT +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef short int16_t; +typedef unsigned short uint16_t; +#endif +#else +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \addtogroup ENCODER_STRUCTURE NvEncodeAPI Data structures + * @{ + */ + +#ifdef _WIN32 +#define NVENCAPI __stdcall +typedef RECT NVENC_RECT; +#else +#define NVENCAPI +// ========================================================================================= +#if !defined(GUID) && !defined(GUID_DEFINED) +/*! + * \struct GUID + * Abstracts the GUID structure for non-windows platforms. + */ +// ========================================================================================= +typedef struct +{ + uint32_t Data1; /**< [in]: Specifies the first 8 hexadecimal digits of the GUID. */ + uint16_t Data2; /**< [in]: Specifies the first group of 4 hexadecimal digits. */ + uint16_t Data3; /**< [in]: Specifies the second group of 4 hexadecimal digits. */ + uint8_t Data4[8]; /**< [in]: Array of 8 bytes. The first 2 bytes contain the third group of 4 hexadecimal digits. + The remaining 6 bytes contain the final 12 hexadecimal digits. */ +} GUID; +#endif // GUID + +/** + * \struct _NVENC_RECT + * Defines a Rectangle. Used in ::NV_ENC_PREPROCESS_FRAME. + */ +typedef struct _NVENC_RECT +{ + uint32_t left; /**< [in]: X coordinate of the upper left corner of rectangular area to be specified. */ + uint32_t top; /**< [in]: Y coordinate of the upper left corner of the rectangular area to be specified. */ + uint32_t right; /**< [in]: X coordinate of the bottom right corner of the rectangular area to be specified. */ + uint32_t bottom; /**< [in]: Y coordinate of the bottom right corner of the rectangular area to be specified. */ +} NVENC_RECT; + +#endif // _WIN32 + +/** @} */ /* End of GUID and NVENC_RECT structure grouping*/ + +typedef void* NV_ENC_INPUT_PTR; /**< NVENCODE API input buffer */ +typedef void* NV_ENC_OUTPUT_PTR; /**< NVENCODE API output buffer*/ +typedef void* NV_ENC_REGISTERED_PTR; /**< A Resource that has been registered with NVENCODE API*/ +typedef void* NV_ENC_CUSTREAM_PTR; /**< Pointer to CUstream*/ + +#define NVENCAPI_MAJOR_VERSION 11 +#define NVENCAPI_MINOR_VERSION 1 + +#define NVENCAPI_VERSION (NVENCAPI_MAJOR_VERSION | (NVENCAPI_MINOR_VERSION << 24)) + +/** + * Macro to generate per-structure version for use with API. + */ +#define NVENCAPI_STRUCT_VERSION(ver) ((uint32_t)NVENCAPI_VERSION | ((ver)<<16) | (0x7 << 28)) + + +#define NVENC_INFINITE_GOPLENGTH 0xffffffff + +#define NV_MAX_SEQ_HDR_LEN (512) + +#ifdef __GNUC__ +#define NV_ENC_DEPRECATED __attribute__ ((deprecated("WILL BE REMOVED IN A FUTURE VIDEO CODEC SDK VERSION"))) +#elif defined(_MSC_VER) +#define NV_ENC_DEPRECATED __declspec(deprecated("WILL BE REMOVED IN A FUTURE VIDEO CODEC SDK VERSION")) +#endif + +// ========================================================================================= +// Encode Codec GUIDS supported by the NvEncodeAPI interface. +// ========================================================================================= + +// {6BC82762-4E63-4ca4-AA85-1E50F321F6BF} +static const GUID NV_ENC_CODEC_H264_GUID = +{ 0x6bc82762, 0x4e63, 0x4ca4, { 0xaa, 0x85, 0x1e, 0x50, 0xf3, 0x21, 0xf6, 0xbf } }; + +// {790CDC88-4522-4d7b-9425-BDA9975F7603} +static const GUID NV_ENC_CODEC_HEVC_GUID = +{ 0x790cdc88, 0x4522, 0x4d7b, { 0x94, 0x25, 0xbd, 0xa9, 0x97, 0x5f, 0x76, 0x3 } }; + + + +// ========================================================================================= +// * Encode Profile GUIDS supported by the NvEncodeAPI interface. +// ========================================================================================= + +// {BFD6F8E7-233C-4341-8B3E-4818523803F4} +static const GUID NV_ENC_CODEC_PROFILE_AUTOSELECT_GUID = +{ 0xbfd6f8e7, 0x233c, 0x4341, { 0x8b, 0x3e, 0x48, 0x18, 0x52, 0x38, 0x3, 0xf4 } }; + +// {0727BCAA-78C4-4c83-8C2F-EF3DFF267C6A} +static const GUID NV_ENC_H264_PROFILE_BASELINE_GUID = +{ 0x727bcaa, 0x78c4, 0x4c83, { 0x8c, 0x2f, 0xef, 0x3d, 0xff, 0x26, 0x7c, 0x6a } }; + +// {60B5C1D4-67FE-4790-94D5-C4726D7B6E6D} +static const GUID NV_ENC_H264_PROFILE_MAIN_GUID = +{ 0x60b5c1d4, 0x67fe, 0x4790, { 0x94, 0xd5, 0xc4, 0x72, 0x6d, 0x7b, 0x6e, 0x6d } }; + +// {E7CBC309-4F7A-4b89-AF2A-D537C92BE310} +static const GUID NV_ENC_H264_PROFILE_HIGH_GUID = +{ 0xe7cbc309, 0x4f7a, 0x4b89, { 0xaf, 0x2a, 0xd5, 0x37, 0xc9, 0x2b, 0xe3, 0x10 } }; + +// {7AC663CB-A598-4960-B844-339B261A7D52} +static const GUID NV_ENC_H264_PROFILE_HIGH_444_GUID = +{ 0x7ac663cb, 0xa598, 0x4960, { 0xb8, 0x44, 0x33, 0x9b, 0x26, 0x1a, 0x7d, 0x52 } }; + +// {40847BF5-33F7-4601-9084-E8FE3C1DB8B7} +static const GUID NV_ENC_H264_PROFILE_STEREO_GUID = +{ 0x40847bf5, 0x33f7, 0x4601, { 0x90, 0x84, 0xe8, 0xfe, 0x3c, 0x1d, 0xb8, 0xb7 } }; + +// {B405AFAC-F32B-417B-89C4-9ABEED3E5978} +static const GUID NV_ENC_H264_PROFILE_PROGRESSIVE_HIGH_GUID = +{ 0xb405afac, 0xf32b, 0x417b, { 0x89, 0xc4, 0x9a, 0xbe, 0xed, 0x3e, 0x59, 0x78 } }; + +// {AEC1BD87-E85B-48f2-84C3-98BCA6285072} +static const GUID NV_ENC_H264_PROFILE_CONSTRAINED_HIGH_GUID = +{ 0xaec1bd87, 0xe85b, 0x48f2, { 0x84, 0xc3, 0x98, 0xbc, 0xa6, 0x28, 0x50, 0x72 } }; + +// {B514C39A-B55B-40fa-878F-F1253B4DFDEC} +static const GUID NV_ENC_HEVC_PROFILE_MAIN_GUID = +{ 0xb514c39a, 0xb55b, 0x40fa, { 0x87, 0x8f, 0xf1, 0x25, 0x3b, 0x4d, 0xfd, 0xec } }; + +// {fa4d2b6c-3a5b-411a-8018-0a3f5e3c9be5} +static const GUID NV_ENC_HEVC_PROFILE_MAIN10_GUID = +{ 0xfa4d2b6c, 0x3a5b, 0x411a, { 0x80, 0x18, 0x0a, 0x3f, 0x5e, 0x3c, 0x9b, 0xe5 } }; + +// For HEVC Main 444 8 bit and HEVC Main 444 10 bit profiles only +// {51ec32b5-1b4c-453c-9cbd-b616bd621341} +static const GUID NV_ENC_HEVC_PROFILE_FREXT_GUID = +{ 0x51ec32b5, 0x1b4c, 0x453c, { 0x9c, 0xbd, 0xb6, 0x16, 0xbd, 0x62, 0x13, 0x41 } }; + +// ========================================================================================= +// * Preset GUIDS supported by the NvEncodeAPI interface. +// ========================================================================================= +// {B2DFB705-4EBD-4C49-9B5F-24A777D3E587} +NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_DEFAULT_GUID = +{ 0xb2dfb705, 0x4ebd, 0x4c49, { 0x9b, 0x5f, 0x24, 0xa7, 0x77, 0xd3, 0xe5, 0x87 } }; + +// {60E4C59F-E846-4484-A56D-CD45BE9FDDF6} +NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_HP_GUID = +{ 0x60e4c59f, 0xe846, 0x4484, { 0xa5, 0x6d, 0xcd, 0x45, 0xbe, 0x9f, 0xdd, 0xf6 } }; + +// {34DBA71D-A77B-4B8F-9C3E-B6D5DA24C012} +NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_HQ_GUID = +{ 0x34dba71d, 0xa77b, 0x4b8f, { 0x9c, 0x3e, 0xb6, 0xd5, 0xda, 0x24, 0xc0, 0x12 } }; + +// {82E3E450-BDBB-4e40-989C-82A90DF9EF32} +NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_BD_GUID = +{ 0x82e3e450, 0xbdbb, 0x4e40, { 0x98, 0x9c, 0x82, 0xa9, 0xd, 0xf9, 0xef, 0x32 } }; + +// {49DF21C5-6DFA-4feb-9787-6ACC9EFFB726} +NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_LOW_LATENCY_DEFAULT_GUID = +{ 0x49df21c5, 0x6dfa, 0x4feb, { 0x97, 0x87, 0x6a, 0xcc, 0x9e, 0xff, 0xb7, 0x26 } }; + +// {C5F733B9-EA97-4cf9-BEC2-BF78A74FD105} +NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_LOW_LATENCY_HQ_GUID = +{ 0xc5f733b9, 0xea97, 0x4cf9, { 0xbe, 0xc2, 0xbf, 0x78, 0xa7, 0x4f, 0xd1, 0x5 } }; + +// {67082A44-4BAD-48FA-98EA-93056D150A58} +NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_LOW_LATENCY_HP_GUID = +{ 0x67082a44, 0x4bad, 0x48fa, { 0x98, 0xea, 0x93, 0x5, 0x6d, 0x15, 0xa, 0x58 } }; + +// {D5BFB716-C604-44e7-9BB8-DEA5510FC3AC} +NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_LOSSLESS_DEFAULT_GUID = +{ 0xd5bfb716, 0xc604, 0x44e7, { 0x9b, 0xb8, 0xde, 0xa5, 0x51, 0xf, 0xc3, 0xac } }; + +// {149998E7-2364-411d-82EF-179888093409} +NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_LOSSLESS_HP_GUID = +{ 0x149998e7, 0x2364, 0x411d, { 0x82, 0xef, 0x17, 0x98, 0x88, 0x9, 0x34, 0x9 } }; + +// Performance degrades and quality improves as we move from P1 to P7. Presets P3 to P7 for H264 and Presets P2 to P7 for HEVC have B frames enabled by default +// for HIGH_QUALITY and LOSSLESS tuning info, and will not work with Weighted Prediction enabled. In case Weighted Prediction is required, disable B frames by +// setting frameIntervalP = 1 +// {FC0A8D3E-45F8-4CF8-80C7-298871590EBF} +static const GUID NV_ENC_PRESET_P1_GUID = +{ 0xfc0a8d3e, 0x45f8, 0x4cf8, { 0x80, 0xc7, 0x29, 0x88, 0x71, 0x59, 0xe, 0xbf } }; + +// {F581CFB8-88D6-4381-93F0-DF13F9C27DAB} +static const GUID NV_ENC_PRESET_P2_GUID = +{ 0xf581cfb8, 0x88d6, 0x4381, { 0x93, 0xf0, 0xdf, 0x13, 0xf9, 0xc2, 0x7d, 0xab } }; + +// {36850110-3A07-441F-94D5-3670631F91F6} +static const GUID NV_ENC_PRESET_P3_GUID = +{ 0x36850110, 0x3a07, 0x441f, { 0x94, 0xd5, 0x36, 0x70, 0x63, 0x1f, 0x91, 0xf6 } }; + +// {90A7B826-DF06-4862-B9D2-CD6D73A08681} +static const GUID NV_ENC_PRESET_P4_GUID = +{ 0x90a7b826, 0xdf06, 0x4862, { 0xb9, 0xd2, 0xcd, 0x6d, 0x73, 0xa0, 0x86, 0x81 } }; + +// {21C6E6B4-297A-4CBA-998F-B6CBDE72ADE3} +static const GUID NV_ENC_PRESET_P5_GUID = +{ 0x21c6e6b4, 0x297a, 0x4cba, { 0x99, 0x8f, 0xb6, 0xcb, 0xde, 0x72, 0xad, 0xe3 } }; + +// {8E75C279-6299-4AB6-8302-0B215A335CF5} +static const GUID NV_ENC_PRESET_P6_GUID = +{ 0x8e75c279, 0x6299, 0x4ab6, { 0x83, 0x2, 0xb, 0x21, 0x5a, 0x33, 0x5c, 0xf5 } }; + +// {84848C12-6F71-4C13-931B-53E283F57974} +static const GUID NV_ENC_PRESET_P7_GUID = +{ 0x84848c12, 0x6f71, 0x4c13, { 0x93, 0x1b, 0x53, 0xe2, 0x83, 0xf5, 0x79, 0x74 } }; + +/** + * \addtogroup ENCODER_STRUCTURE NvEncodeAPI Data structures + * @{ + */ + +/** + * Input frame encode modes + */ +typedef enum _NV_ENC_PARAMS_FRAME_FIELD_MODE +{ + NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME = 0x01, /**< Frame mode */ + NV_ENC_PARAMS_FRAME_FIELD_MODE_FIELD = 0x02, /**< Field mode */ + NV_ENC_PARAMS_FRAME_FIELD_MODE_MBAFF = 0x03 /**< MB adaptive frame/field */ +} NV_ENC_PARAMS_FRAME_FIELD_MODE; + +/** + * Rate Control Modes + */ +typedef enum _NV_ENC_PARAMS_RC_MODE +{ + NV_ENC_PARAMS_RC_CONSTQP = 0x0, /**< Constant QP mode */ + NV_ENC_PARAMS_RC_VBR = 0x1, /**< Variable bitrate mode */ + NV_ENC_PARAMS_RC_CBR = 0x2, /**< Constant bitrate mode */ + NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ = 0x8, /**< Deprecated, use NV_ENC_PARAMS_RC_CBR + NV_ENC_TWO_PASS_QUARTER_RESOLUTION / NV_ENC_TWO_PASS_FULL_RESOLUTION + + lowDelayKeyFrameScale=1 */ + NV_ENC_PARAMS_RC_CBR_HQ = 0x10, /**< Deprecated, use NV_ENC_PARAMS_RC_CBR + NV_ENC_TWO_PASS_QUARTER_RESOLUTION / NV_ENC_TWO_PASS_FULL_RESOLUTION */ + NV_ENC_PARAMS_RC_VBR_HQ = 0x20 /**< Deprecated, use NV_ENC_PARAMS_RC_VBR + NV_ENC_TWO_PASS_QUARTER_RESOLUTION / NV_ENC_TWO_PASS_FULL_RESOLUTION */ +} NV_ENC_PARAMS_RC_MODE; + +/** + * Multi Pass encoding + */ +typedef enum _NV_ENC_MULTI_PASS +{ + NV_ENC_MULTI_PASS_DISABLED = 0x0, /**< Single Pass */ + NV_ENC_TWO_PASS_QUARTER_RESOLUTION = 0x1, /**< Two Pass encoding is enabled where first Pass is quarter resolution */ + NV_ENC_TWO_PASS_FULL_RESOLUTION = 0x2, /**< Two Pass encoding is enabled where first Pass is full resolution */ +} NV_ENC_MULTI_PASS; + +/** + * Emphasis Levels + */ +typedef enum _NV_ENC_EMPHASIS_MAP_LEVEL +{ + NV_ENC_EMPHASIS_MAP_LEVEL_0 = 0x0, /**< Emphasis Map Level 0, for zero Delta QP value */ + NV_ENC_EMPHASIS_MAP_LEVEL_1 = 0x1, /**< Emphasis Map Level 1, for very low Delta QP value */ + NV_ENC_EMPHASIS_MAP_LEVEL_2 = 0x2, /**< Emphasis Map Level 2, for low Delta QP value */ + NV_ENC_EMPHASIS_MAP_LEVEL_3 = 0x3, /**< Emphasis Map Level 3, for medium Delta QP value */ + NV_ENC_EMPHASIS_MAP_LEVEL_4 = 0x4, /**< Emphasis Map Level 4, for high Delta QP value */ + NV_ENC_EMPHASIS_MAP_LEVEL_5 = 0x5 /**< Emphasis Map Level 5, for very high Delta QP value */ +} NV_ENC_EMPHASIS_MAP_LEVEL; + +/** + * QP MAP MODE + */ +typedef enum _NV_ENC_QP_MAP_MODE +{ + NV_ENC_QP_MAP_DISABLED = 0x0, /**< Value in NV_ENC_PIC_PARAMS::qpDeltaMap have no effect. */ + NV_ENC_QP_MAP_EMPHASIS = 0x1, /**< Value in NV_ENC_PIC_PARAMS::qpDeltaMap will be treated as Emphasis level. Currently this is only supported for H264 */ + NV_ENC_QP_MAP_DELTA = 0x2, /**< Value in NV_ENC_PIC_PARAMS::qpDeltaMap will be treated as QP delta map. */ + NV_ENC_QP_MAP = 0x3, /**< Currently This is not supported. Value in NV_ENC_PIC_PARAMS::qpDeltaMap will be treated as QP value. */ +} NV_ENC_QP_MAP_MODE; + +#define NV_ENC_PARAMS_RC_VBR_MINQP (NV_ENC_PARAMS_RC_MODE)0x4 /**< Deprecated */ +#define NV_ENC_PARAMS_RC_2_PASS_QUALITY NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ /**< Deprecated */ +#define NV_ENC_PARAMS_RC_2_PASS_FRAMESIZE_CAP NV_ENC_PARAMS_RC_CBR_HQ /**< Deprecated */ +#define NV_ENC_PARAMS_RC_2_PASS_VBR NV_ENC_PARAMS_RC_VBR_HQ /**< Deprecated */ +#define NV_ENC_PARAMS_RC_CBR2 NV_ENC_PARAMS_RC_CBR /**< Deprecated */ + +/** + * Input picture structure + */ +typedef enum _NV_ENC_PIC_STRUCT +{ + NV_ENC_PIC_STRUCT_FRAME = 0x01, /**< Progressive frame */ + NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM = 0x02, /**< Field encoding top field first */ + NV_ENC_PIC_STRUCT_FIELD_BOTTOM_TOP = 0x03 /**< Field encoding bottom field first */ +} NV_ENC_PIC_STRUCT; + +/** + * Input picture type + */ +typedef enum _NV_ENC_PIC_TYPE +{ + NV_ENC_PIC_TYPE_P = 0x0, /**< Forward predicted */ + NV_ENC_PIC_TYPE_B = 0x01, /**< Bi-directionally predicted picture */ + NV_ENC_PIC_TYPE_I = 0x02, /**< Intra predicted picture */ + NV_ENC_PIC_TYPE_IDR = 0x03, /**< IDR picture */ + NV_ENC_PIC_TYPE_BI = 0x04, /**< Bi-directionally predicted with only Intra MBs */ + NV_ENC_PIC_TYPE_SKIPPED = 0x05, /**< Picture is skipped */ + NV_ENC_PIC_TYPE_INTRA_REFRESH = 0x06, /**< First picture in intra refresh cycle */ + NV_ENC_PIC_TYPE_NONREF_P = 0x07, /**< Non reference P picture */ + NV_ENC_PIC_TYPE_UNKNOWN = 0xFF /**< Picture type unknown */ +} NV_ENC_PIC_TYPE; + +/** + * Motion vector precisions + */ +typedef enum _NV_ENC_MV_PRECISION +{ + NV_ENC_MV_PRECISION_DEFAULT = 0x0, /**< Driver selects Quarter-Pel motion vector precision by default */ + NV_ENC_MV_PRECISION_FULL_PEL = 0x01, /**< Full-Pel motion vector precision */ + NV_ENC_MV_PRECISION_HALF_PEL = 0x02, /**< Half-Pel motion vector precision */ + NV_ENC_MV_PRECISION_QUARTER_PEL = 0x03 /**< Quarter-Pel motion vector precision */ +} NV_ENC_MV_PRECISION; + + +/** + * Input buffer formats + */ +typedef enum _NV_ENC_BUFFER_FORMAT +{ + NV_ENC_BUFFER_FORMAT_UNDEFINED = 0x00000000, /**< Undefined buffer format */ + + NV_ENC_BUFFER_FORMAT_NV12 = 0x00000001, /**< Semi-Planar YUV [Y plane followed by interleaved UV plane] */ + NV_ENC_BUFFER_FORMAT_YV12 = 0x00000010, /**< Planar YUV [Y plane followed by V and U planes] */ + NV_ENC_BUFFER_FORMAT_IYUV = 0x00000100, /**< Planar YUV [Y plane followed by U and V planes] */ + NV_ENC_BUFFER_FORMAT_YUV444 = 0x00001000, /**< Planar YUV [Y plane followed by U and V planes] */ + NV_ENC_BUFFER_FORMAT_YUV420_10BIT = 0x00010000, /**< 10 bit Semi-Planar YUV [Y plane followed by interleaved UV plane]. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */ + NV_ENC_BUFFER_FORMAT_YUV444_10BIT = 0x00100000, /**< 10 bit Planar YUV444 [Y plane followed by U and V planes]. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */ + NV_ENC_BUFFER_FORMAT_ARGB = 0x01000000, /**< 8 bit Packed A8R8G8B8. This is a word-ordered format + where a pixel is represented by a 32-bit word with B + in the lowest 8 bits, G in the next 8 bits, R in the + 8 bits after that and A in the highest 8 bits. */ + NV_ENC_BUFFER_FORMAT_ARGB10 = 0x02000000, /**< 10 bit Packed A2R10G10B10. This is a word-ordered format + where a pixel is represented by a 32-bit word with B + in the lowest 10 bits, G in the next 10 bits, R in the + 10 bits after that and A in the highest 2 bits. */ + NV_ENC_BUFFER_FORMAT_AYUV = 0x04000000, /**< 8 bit Packed A8Y8U8V8. This is a word-ordered format + where a pixel is represented by a 32-bit word with V + in the lowest 8 bits, U in the next 8 bits, Y in the + 8 bits after that and A in the highest 8 bits. */ + NV_ENC_BUFFER_FORMAT_ABGR = 0x10000000, /**< 8 bit Packed A8B8G8R8. This is a word-ordered format + where a pixel is represented by a 32-bit word with R + in the lowest 8 bits, G in the next 8 bits, B in the + 8 bits after that and A in the highest 8 bits. */ + NV_ENC_BUFFER_FORMAT_ABGR10 = 0x20000000, /**< 10 bit Packed A2B10G10R10. This is a word-ordered format + where a pixel is represented by a 32-bit word with R + in the lowest 10 bits, G in the next 10 bits, B in the + 10 bits after that and A in the highest 2 bits. */ + NV_ENC_BUFFER_FORMAT_U8 = 0x40000000, /**< Buffer format representing one-dimensional buffer. + This format should be used only when registering the + resource as output buffer, which will be used to write + the encoded bit stream or H.264 ME only mode output. */ +} NV_ENC_BUFFER_FORMAT; + +#define NV_ENC_BUFFER_FORMAT_NV12_PL NV_ENC_BUFFER_FORMAT_NV12 +#define NV_ENC_BUFFER_FORMAT_YV12_PL NV_ENC_BUFFER_FORMAT_YV12 +#define NV_ENC_BUFFER_FORMAT_IYUV_PL NV_ENC_BUFFER_FORMAT_IYUV +#define NV_ENC_BUFFER_FORMAT_YUV444_PL NV_ENC_BUFFER_FORMAT_YUV444 + +/** + * Encoding levels + */ +typedef enum _NV_ENC_LEVEL +{ + NV_ENC_LEVEL_AUTOSELECT = 0, + + NV_ENC_LEVEL_H264_1 = 10, + NV_ENC_LEVEL_H264_1b = 9, + NV_ENC_LEVEL_H264_11 = 11, + NV_ENC_LEVEL_H264_12 = 12, + NV_ENC_LEVEL_H264_13 = 13, + NV_ENC_LEVEL_H264_2 = 20, + NV_ENC_LEVEL_H264_21 = 21, + NV_ENC_LEVEL_H264_22 = 22, + NV_ENC_LEVEL_H264_3 = 30, + NV_ENC_LEVEL_H264_31 = 31, + NV_ENC_LEVEL_H264_32 = 32, + NV_ENC_LEVEL_H264_4 = 40, + NV_ENC_LEVEL_H264_41 = 41, + NV_ENC_LEVEL_H264_42 = 42, + NV_ENC_LEVEL_H264_5 = 50, + NV_ENC_LEVEL_H264_51 = 51, + NV_ENC_LEVEL_H264_52 = 52, + NV_ENC_LEVEL_H264_60 = 60, + NV_ENC_LEVEL_H264_61 = 61, + NV_ENC_LEVEL_H264_62 = 62, + + NV_ENC_LEVEL_HEVC_1 = 30, + NV_ENC_LEVEL_HEVC_2 = 60, + NV_ENC_LEVEL_HEVC_21 = 63, + NV_ENC_LEVEL_HEVC_3 = 90, + NV_ENC_LEVEL_HEVC_31 = 93, + NV_ENC_LEVEL_HEVC_4 = 120, + NV_ENC_LEVEL_HEVC_41 = 123, + NV_ENC_LEVEL_HEVC_5 = 150, + NV_ENC_LEVEL_HEVC_51 = 153, + NV_ENC_LEVEL_HEVC_52 = 156, + NV_ENC_LEVEL_HEVC_6 = 180, + NV_ENC_LEVEL_HEVC_61 = 183, + NV_ENC_LEVEL_HEVC_62 = 186, + + NV_ENC_TIER_HEVC_MAIN = 0, + NV_ENC_TIER_HEVC_HIGH = 1 +} NV_ENC_LEVEL; + +/** + * Error Codes + */ +typedef enum _NVENCSTATUS +{ + /** + * This indicates that API call returned with no errors. + */ + NV_ENC_SUCCESS, + + /** + * This indicates that no encode capable devices were detected. + */ + NV_ENC_ERR_NO_ENCODE_DEVICE, + + /** + * This indicates that devices pass by the client is not supported. + */ + NV_ENC_ERR_UNSUPPORTED_DEVICE, + + /** + * This indicates that the encoder device supplied by the client is not + * valid. + */ + NV_ENC_ERR_INVALID_ENCODERDEVICE, + + /** + * This indicates that device passed to the API call is invalid. + */ + NV_ENC_ERR_INVALID_DEVICE, + + /** + * This indicates that device passed to the API call is no longer available and + * needs to be reinitialized. The clients need to destroy the current encoder + * session by freeing the allocated input output buffers and destroying the device + * and create a new encoding session. + */ + NV_ENC_ERR_DEVICE_NOT_EXIST, + + /** + * This indicates that one or more of the pointers passed to the API call + * is invalid. + */ + NV_ENC_ERR_INVALID_PTR, + + /** + * This indicates that completion event passed in ::NvEncEncodePicture() call + * is invalid. + */ + NV_ENC_ERR_INVALID_EVENT, + + /** + * This indicates that one or more of the parameter passed to the API call + * is invalid. + */ + NV_ENC_ERR_INVALID_PARAM, + + /** + * This indicates that an API call was made in wrong sequence/order. + */ + NV_ENC_ERR_INVALID_CALL, + + /** + * This indicates that the API call failed because it was unable to allocate + * enough memory to perform the requested operation. + */ + NV_ENC_ERR_OUT_OF_MEMORY, + + /** + * This indicates that the encoder has not been initialized with + * ::NvEncInitializeEncoder() or that initialization has failed. + * The client cannot allocate input or output buffers or do any encoding + * related operation before successfully initializing the encoder. + */ + NV_ENC_ERR_ENCODER_NOT_INITIALIZED, + + /** + * This indicates that an unsupported parameter was passed by the client. + */ + NV_ENC_ERR_UNSUPPORTED_PARAM, + + /** + * This indicates that the ::NvEncLockBitstream() failed to lock the output + * buffer. This happens when the client makes a non blocking lock call to + * access the output bitstream by passing NV_ENC_LOCK_BITSTREAM::doNotWait flag. + * This is not a fatal error and client should retry the same operation after + * few milliseconds. + */ + NV_ENC_ERR_LOCK_BUSY, + + /** + * This indicates that the size of the user buffer passed by the client is + * insufficient for the requested operation. + */ + NV_ENC_ERR_NOT_ENOUGH_BUFFER, + + /** + * This indicates that an invalid struct version was used by the client. + */ + NV_ENC_ERR_INVALID_VERSION, + + /** + * This indicates that ::NvEncMapInputResource() API failed to map the client + * provided input resource. + */ + NV_ENC_ERR_MAP_FAILED, + + /** + * This indicates encode driver requires more input buffers to produce an output + * bitstream. If this error is returned from ::NvEncEncodePicture() API, this + * is not a fatal error. If the client is encoding with B frames then, + * ::NvEncEncodePicture() API might be buffering the input frame for re-ordering. + * + * A client operating in synchronous mode cannot call ::NvEncLockBitstream() + * API on the output bitstream buffer if ::NvEncEncodePicture() returned the + * ::NV_ENC_ERR_NEED_MORE_INPUT error code. + * The client must continue providing input frames until encode driver returns + * ::NV_ENC_SUCCESS. After receiving ::NV_ENC_SUCCESS status the client can call + * ::NvEncLockBitstream() API on the output buffers in the same order in which + * it has called ::NvEncEncodePicture(). + */ + NV_ENC_ERR_NEED_MORE_INPUT, + + /** + * This indicates that the HW encoder is busy encoding and is unable to encode + * the input. The client should call ::NvEncEncodePicture() again after few + * milliseconds. + */ + NV_ENC_ERR_ENCODER_BUSY, + + /** + * This indicates that the completion event passed in ::NvEncEncodePicture() + * API has not been registered with encoder driver using ::NvEncRegisterAsyncEvent(). + */ + NV_ENC_ERR_EVENT_NOT_REGISTERD, + + /** + * This indicates that an unknown internal error has occurred. + */ + NV_ENC_ERR_GENERIC, + + /** + * This indicates that the client is attempting to use a feature + * that is not available for the license type for the current system. + */ + NV_ENC_ERR_INCOMPATIBLE_CLIENT_KEY, + + /** + * This indicates that the client is attempting to use a feature + * that is not implemented for the current version. + */ + NV_ENC_ERR_UNIMPLEMENTED, + + /** + * This indicates that the ::NvEncRegisterResource API failed to register the resource. + */ + NV_ENC_ERR_RESOURCE_REGISTER_FAILED, + + /** + * This indicates that the client is attempting to unregister a resource + * that has not been successfully registered. + */ + NV_ENC_ERR_RESOURCE_NOT_REGISTERED, + + /** + * This indicates that the client is attempting to unmap a resource + * that has not been successfully mapped. + */ + NV_ENC_ERR_RESOURCE_NOT_MAPPED, + +} NVENCSTATUS; + +/** + * Encode Picture encode flags. + */ +typedef enum _NV_ENC_PIC_FLAGS +{ + NV_ENC_PIC_FLAG_FORCEINTRA = 0x1, /**< Encode the current picture as an Intra picture */ + NV_ENC_PIC_FLAG_FORCEIDR = 0x2, /**< Encode the current picture as an IDR picture. + This flag is only valid when Picture type decision is taken by the Encoder + [_NV_ENC_INITIALIZE_PARAMS::enablePTD == 1]. */ + NV_ENC_PIC_FLAG_OUTPUT_SPSPPS = 0x4, /**< Write the sequence and picture header in encoded bitstream of the current picture */ + NV_ENC_PIC_FLAG_EOS = 0x8, /**< Indicates end of the input stream */ +} NV_ENC_PIC_FLAGS; + +/** + * Memory heap to allocate input and output buffers. + */ +typedef enum _NV_ENC_MEMORY_HEAP +{ + NV_ENC_MEMORY_HEAP_AUTOSELECT = 0, /**< Memory heap to be decided by the encoder driver based on the usage */ + NV_ENC_MEMORY_HEAP_VID = 1, /**< Memory heap is in local video memory */ + NV_ENC_MEMORY_HEAP_SYSMEM_CACHED = 2, /**< Memory heap is in cached system memory */ + NV_ENC_MEMORY_HEAP_SYSMEM_UNCACHED = 3 /**< Memory heap is in uncached system memory */ +} NV_ENC_MEMORY_HEAP; + +/** + * B-frame used as reference modes + */ +typedef enum _NV_ENC_BFRAME_REF_MODE +{ + NV_ENC_BFRAME_REF_MODE_DISABLED = 0x0, /**< B frame is not used for reference */ + NV_ENC_BFRAME_REF_MODE_EACH = 0x1, /**< Each B-frame will be used for reference. currently not supported for H.264 */ + NV_ENC_BFRAME_REF_MODE_MIDDLE = 0x2, /**< Only(Number of B-frame)/2 th B-frame will be used for reference */ +} NV_ENC_BFRAME_REF_MODE; + +/** + * H.264 entropy coding modes. + */ +typedef enum _NV_ENC_H264_ENTROPY_CODING_MODE +{ + NV_ENC_H264_ENTROPY_CODING_MODE_AUTOSELECT = 0x0, /**< Entropy coding mode is auto selected by the encoder driver */ + NV_ENC_H264_ENTROPY_CODING_MODE_CABAC = 0x1, /**< Entropy coding mode is CABAC */ + NV_ENC_H264_ENTROPY_CODING_MODE_CAVLC = 0x2 /**< Entropy coding mode is CAVLC */ +} NV_ENC_H264_ENTROPY_CODING_MODE; + +/** + * H.264 specific BDirect modes + */ +typedef enum _NV_ENC_H264_BDIRECT_MODE +{ + NV_ENC_H264_BDIRECT_MODE_AUTOSELECT = 0x0, /**< BDirect mode is auto selected by the encoder driver */ + NV_ENC_H264_BDIRECT_MODE_DISABLE = 0x1, /**< Disable BDirect mode */ + NV_ENC_H264_BDIRECT_MODE_TEMPORAL = 0x2, /**< Temporal BDirect mode */ + NV_ENC_H264_BDIRECT_MODE_SPATIAL = 0x3 /**< Spatial BDirect mode */ +} NV_ENC_H264_BDIRECT_MODE; + +/** + * H.264 specific FMO usage + */ +typedef enum _NV_ENC_H264_FMO_MODE +{ + NV_ENC_H264_FMO_AUTOSELECT = 0x0, /**< FMO usage is auto selected by the encoder driver */ + NV_ENC_H264_FMO_ENABLE = 0x1, /**< Enable FMO */ + NV_ENC_H264_FMO_DISABLE = 0x2, /**< Disable FMO */ +} NV_ENC_H264_FMO_MODE; + +/** + * H.264 specific Adaptive Transform modes + */ +typedef enum _NV_ENC_H264_ADAPTIVE_TRANSFORM_MODE +{ + NV_ENC_H264_ADAPTIVE_TRANSFORM_AUTOSELECT = 0x0, /**< Adaptive Transform 8x8 mode is auto selected by the encoder driver*/ + NV_ENC_H264_ADAPTIVE_TRANSFORM_DISABLE = 0x1, /**< Adaptive Transform 8x8 mode disabled */ + NV_ENC_H264_ADAPTIVE_TRANSFORM_ENABLE = 0x2, /**< Adaptive Transform 8x8 mode should be used */ +} NV_ENC_H264_ADAPTIVE_TRANSFORM_MODE; + +/** + * Stereo frame packing modes. + */ +typedef enum _NV_ENC_STEREO_PACKING_MODE +{ + NV_ENC_STEREO_PACKING_MODE_NONE = 0x0, /**< No Stereo packing required */ + NV_ENC_STEREO_PACKING_MODE_CHECKERBOARD = 0x1, /**< Checkerboard mode for packing stereo frames */ + NV_ENC_STEREO_PACKING_MODE_COLINTERLEAVE = 0x2, /**< Column Interleave mode for packing stereo frames */ + NV_ENC_STEREO_PACKING_MODE_ROWINTERLEAVE = 0x3, /**< Row Interleave mode for packing stereo frames */ + NV_ENC_STEREO_PACKING_MODE_SIDEBYSIDE = 0x4, /**< Side-by-side mode for packing stereo frames */ + NV_ENC_STEREO_PACKING_MODE_TOPBOTTOM = 0x5, /**< Top-Bottom mode for packing stereo frames */ + NV_ENC_STEREO_PACKING_MODE_FRAMESEQ = 0x6 /**< Frame Sequential mode for packing stereo frames */ +} NV_ENC_STEREO_PACKING_MODE; + +/** + * Input Resource type + */ +typedef enum _NV_ENC_INPUT_RESOURCE_TYPE +{ + NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX = 0x0, /**< input resource type is a directx9 surface*/ + NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR = 0x1, /**< input resource type is a cuda device pointer surface*/ + NV_ENC_INPUT_RESOURCE_TYPE_CUDAARRAY = 0x2, /**< input resource type is a cuda array surface. + This array must be a 2D array and the CUDA_ARRAY3D_SURFACE_LDST + flag must have been specified when creating it. */ + NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX = 0x3 /**< input resource type is an OpenGL texture */ +} NV_ENC_INPUT_RESOURCE_TYPE; + +/** + * Buffer usage + */ +typedef enum _NV_ENC_BUFFER_USAGE +{ + NV_ENC_INPUT_IMAGE = 0x0, /**< Registered surface will be used for input image */ + NV_ENC_OUTPUT_MOTION_VECTOR = 0x1, /**< Registered surface will be used for output of H.264 ME only mode. + This buffer usage type is not supported for HEVC ME only mode. */ + NV_ENC_OUTPUT_BITSTREAM = 0x2 /**< Registered surface will be used for output bitstream in encoding */ +} NV_ENC_BUFFER_USAGE; + +/** + * Encoder Device type + */ +typedef enum _NV_ENC_DEVICE_TYPE +{ + NV_ENC_DEVICE_TYPE_DIRECTX = 0x0, /**< encode device type is a directx9 device */ + NV_ENC_DEVICE_TYPE_CUDA = 0x1, /**< encode device type is a cuda device */ + NV_ENC_DEVICE_TYPE_OPENGL = 0x2 /**< encode device type is an OpenGL device. + Use of this device type is supported only on Linux */ +} NV_ENC_DEVICE_TYPE; + +/** + * Number of reference frames + */ +typedef enum _NV_ENC_NUM_REF_FRAMES +{ + NV_ENC_NUM_REF_FRAMES_AUTOSELECT = 0x0, /**< Number of reference frames is auto selected by the encoder driver */ + NV_ENC_NUM_REF_FRAMES_1 = 0x1, /**< Number of reference frames equal to 1 */ + NV_ENC_NUM_REF_FRAMES_2 = 0x2, /**< Number of reference frames equal to 2 */ + NV_ENC_NUM_REF_FRAMES_3 = 0x3, /**< Number of reference frames equal to 3 */ + NV_ENC_NUM_REF_FRAMES_4 = 0x4, /**< Number of reference frames equal to 4 */ + NV_ENC_NUM_REF_FRAMES_5 = 0x5, /**< Number of reference frames equal to 5 */ + NV_ENC_NUM_REF_FRAMES_6 = 0x6, /**< Number of reference frames equal to 6 */ + NV_ENC_NUM_REF_FRAMES_7 = 0x7 /**< Number of reference frames equal to 7 */ +} NV_ENC_NUM_REF_FRAMES; + +/** + * Encoder capabilities enumeration. + */ +typedef enum _NV_ENC_CAPS +{ + /** + * Maximum number of B-Frames supported. + */ + NV_ENC_CAPS_NUM_MAX_BFRAMES, + + /** + * Rate control modes supported. + * \n The API return value is a bitmask of the values in NV_ENC_PARAMS_RC_MODE. + */ + NV_ENC_CAPS_SUPPORTED_RATECONTROL_MODES, + + /** + * Indicates HW support for field mode encoding. + * \n 0 : Interlaced mode encoding is not supported. + * \n 1 : Interlaced field mode encoding is supported. + * \n 2 : Interlaced frame encoding and field mode encoding are both supported. + */ + NV_ENC_CAPS_SUPPORT_FIELD_ENCODING, + + /** + * Indicates HW support for monochrome mode encoding. + * \n 0 : Monochrome mode not supported. + * \n 1 : Monochrome mode supported. + */ + NV_ENC_CAPS_SUPPORT_MONOCHROME, + + /** + * Indicates HW support for FMO. + * \n 0 : FMO not supported. + * \n 1 : FMO supported. + */ + NV_ENC_CAPS_SUPPORT_FMO, + + /** + * Indicates HW capability for Quarter pel motion estimation. + * \n 0 : Quarter-Pel Motion Estimation not supported. + * \n 1 : Quarter-Pel Motion Estimation supported. + */ + NV_ENC_CAPS_SUPPORT_QPELMV, + + /** + * H.264 specific. Indicates HW support for BDirect modes. + * \n 0 : BDirect mode encoding not supported. + * \n 1 : BDirect mode encoding supported. + */ + NV_ENC_CAPS_SUPPORT_BDIRECT_MODE, + + /** + * H264 specific. Indicates HW support for CABAC entropy coding mode. + * \n 0 : CABAC entropy coding not supported. + * \n 1 : CABAC entropy coding supported. + */ + NV_ENC_CAPS_SUPPORT_CABAC, + + /** + * Indicates HW support for Adaptive Transform. + * \n 0 : Adaptive Transform not supported. + * \n 1 : Adaptive Transform supported. + */ + NV_ENC_CAPS_SUPPORT_ADAPTIVE_TRANSFORM, + + /** + * Indicates HW support for Multi View Coding. + * \n 0 : Multi View Coding not supported. + * \n 1 : Multi View Coding supported. + */ + NV_ENC_CAPS_SUPPORT_STEREO_MVC, + + /** + * Indicates HW support for encoding Temporal layers. + * \n 0 : Encoding Temporal layers not supported. + * \n 1 : Encoding Temporal layers supported. + */ + NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS, + + /** + * Indicates HW support for Hierarchical P frames. + * \n 0 : Hierarchical P frames not supported. + * \n 1 : Hierarchical P frames supported. + */ + NV_ENC_CAPS_SUPPORT_HIERARCHICAL_PFRAMES, + + /** + * Indicates HW support for Hierarchical B frames. + * \n 0 : Hierarchical B frames not supported. + * \n 1 : Hierarchical B frames supported. + */ + NV_ENC_CAPS_SUPPORT_HIERARCHICAL_BFRAMES, + + /** + * Maximum Encoding level supported (See ::NV_ENC_LEVEL for details). + */ + NV_ENC_CAPS_LEVEL_MAX, + + /** + * Minimum Encoding level supported (See ::NV_ENC_LEVEL for details). + */ + NV_ENC_CAPS_LEVEL_MIN, + + /** + * Indicates HW support for separate colour plane encoding. + * \n 0 : Separate colour plane encoding not supported. + * \n 1 : Separate colour plane encoding supported. + */ + NV_ENC_CAPS_SEPARATE_COLOUR_PLANE, + + /** + * Maximum output width supported. + */ + NV_ENC_CAPS_WIDTH_MAX, + + /** + * Maximum output height supported. + */ + NV_ENC_CAPS_HEIGHT_MAX, + + /** + * Indicates Temporal Scalability Support. + * \n 0 : Temporal SVC encoding not supported. + * \n 1 : Temporal SVC encoding supported. + */ + NV_ENC_CAPS_SUPPORT_TEMPORAL_SVC, + + /** + * Indicates Dynamic Encode Resolution Change Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Dynamic Encode Resolution Change not supported. + * \n 1 : Dynamic Encode Resolution Change supported. + */ + NV_ENC_CAPS_SUPPORT_DYN_RES_CHANGE, + + /** + * Indicates Dynamic Encode Bitrate Change Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Dynamic Encode bitrate change not supported. + * \n 1 : Dynamic Encode bitrate change supported. + */ + NV_ENC_CAPS_SUPPORT_DYN_BITRATE_CHANGE, + + /** + * Indicates Forcing Constant QP On The Fly Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Forcing constant QP on the fly not supported. + * \n 1 : Forcing constant QP on the fly supported. + */ + NV_ENC_CAPS_SUPPORT_DYN_FORCE_CONSTQP, + + /** + * Indicates Dynamic rate control mode Change Support. + * \n 0 : Dynamic rate control mode change not supported. + * \n 1 : Dynamic rate control mode change supported. + */ + NV_ENC_CAPS_SUPPORT_DYN_RCMODE_CHANGE, + + /** + * Indicates Subframe readback support for slice-based encoding. If this feature is supported, it can be enabled by setting enableSubFrameWrite = 1. + * \n 0 : Subframe readback not supported. + * \n 1 : Subframe readback supported. + */ + NV_ENC_CAPS_SUPPORT_SUBFRAME_READBACK, + + /** + * Indicates Constrained Encoding mode support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Constrained encoding mode not supported. + * \n 1 : Constrained encoding mode supported. + * If this mode is supported client can enable this during initialization. + * Client can then force a picture to be coded as constrained picture where + * in-loop filtering is disabled across slice boundaries and prediction vectors for inter + * macroblocks in each slice will be restricted to the slice region. + */ + NV_ENC_CAPS_SUPPORT_CONSTRAINED_ENCODING, + + /** + * Indicates Intra Refresh Mode Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Intra Refresh Mode not supported. + * \n 1 : Intra Refresh Mode supported. + */ + NV_ENC_CAPS_SUPPORT_INTRA_REFRESH, + + /** + * Indicates Custom VBV Buffer Size support. It can be used for capping frame size. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Custom VBV buffer size specification from client, not supported. + * \n 1 : Custom VBV buffer size specification from client, supported. + */ + NV_ENC_CAPS_SUPPORT_CUSTOM_VBV_BUF_SIZE, + + /** + * Indicates Dynamic Slice Mode Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Dynamic Slice Mode not supported. + * \n 1 : Dynamic Slice Mode supported. + */ + NV_ENC_CAPS_SUPPORT_DYNAMIC_SLICE_MODE, + + /** + * Indicates Reference Picture Invalidation Support. + * Support added from NvEncodeAPI version 2.0. + * \n 0 : Reference Picture Invalidation not supported. + * \n 1 : Reference Picture Invalidation supported. + */ + NV_ENC_CAPS_SUPPORT_REF_PIC_INVALIDATION, + + /** + * Indicates support for Pre-Processing. + * The API return value is a bitmask of the values defined in ::NV_ENC_PREPROC_FLAGS + */ + NV_ENC_CAPS_PREPROC_SUPPORT, + + /** + * Indicates support Async mode. + * \n 0 : Async Encode mode not supported. + * \n 1 : Async Encode mode supported. + */ + NV_ENC_CAPS_ASYNC_ENCODE_SUPPORT, + + /** + * Maximum MBs per frame supported. + */ + NV_ENC_CAPS_MB_NUM_MAX, + + /** + * Maximum aggregate throughput in MBs per sec. + */ + NV_ENC_CAPS_MB_PER_SEC_MAX, + + /** + * Indicates HW support for YUV444 mode encoding. + * \n 0 : YUV444 mode encoding not supported. + * \n 1 : YUV444 mode encoding supported. + */ + NV_ENC_CAPS_SUPPORT_YUV444_ENCODE, + + /** + * Indicates HW support for lossless encoding. + * \n 0 : lossless encoding not supported. + * \n 1 : lossless encoding supported. + */ + NV_ENC_CAPS_SUPPORT_LOSSLESS_ENCODE, + + /** + * Indicates HW support for Sample Adaptive Offset. + * \n 0 : SAO not supported. + * \n 1 : SAO encoding supported. + */ + NV_ENC_CAPS_SUPPORT_SAO, + + /** + * Indicates HW support for Motion Estimation Only Mode. + * \n 0 : MEOnly Mode not supported. + * \n 1 : MEOnly Mode supported for I and P frames. + * \n 2 : MEOnly Mode supported for I, P and B frames. + */ + NV_ENC_CAPS_SUPPORT_MEONLY_MODE, + + /** + * Indicates HW support for lookahead encoding (enableLookahead=1). + * \n 0 : Lookahead not supported. + * \n 1 : Lookahead supported. + */ + NV_ENC_CAPS_SUPPORT_LOOKAHEAD, + + /** + * Indicates HW support for temporal AQ encoding (enableTemporalAQ=1). + * \n 0 : Temporal AQ not supported. + * \n 1 : Temporal AQ supported. + */ + NV_ENC_CAPS_SUPPORT_TEMPORAL_AQ, + /** + * Indicates HW support for 10 bit encoding. + * \n 0 : 10 bit encoding not supported. + * \n 1 : 10 bit encoding supported. + */ + NV_ENC_CAPS_SUPPORT_10BIT_ENCODE, + /** + * Maximum number of Long Term Reference frames supported + */ + NV_ENC_CAPS_NUM_MAX_LTR_FRAMES, + + /** + * Indicates HW support for Weighted Prediction. + * \n 0 : Weighted Prediction not supported. + * \n 1 : Weighted Prediction supported. + */ + NV_ENC_CAPS_SUPPORT_WEIGHTED_PREDICTION, + + + /** + * On managed (vGPU) platforms (Windows only), this API, in conjunction with other GRID Management APIs, can be used + * to estimate the residual capacity of the hardware encoder on the GPU as a percentage of the total available encoder capacity. + * This API can be called at any time; i.e. during the encode session or before opening the encode session. + * If the available encoder capacity is returned as zero, applications may choose to switch to software encoding + * and continue to call this API (e.g. polling once per second) until capacity becomes available. + * + * On bare metal (non-virtualized GPU) and linux platforms, this API always returns 100. + */ + NV_ENC_CAPS_DYNAMIC_QUERY_ENCODER_CAPACITY, + + /** + * Indicates B as reference support. + * \n 0 : B as reference is not supported. + * \n 1 : each B-Frame as reference is supported. + * \n 2 : only Middle B-frame as reference is supported. + */ + NV_ENC_CAPS_SUPPORT_BFRAME_REF_MODE, + + /** + * Indicates HW support for Emphasis Level Map based delta QP computation. + * \n 0 : Emphasis Level Map based delta QP not supported. + * \n 1 : Emphasis Level Map based delta QP is supported. + */ + NV_ENC_CAPS_SUPPORT_EMPHASIS_LEVEL_MAP, + + /** + * Minimum input width supported. + */ + NV_ENC_CAPS_WIDTH_MIN, + + /** + * Minimum input height supported. + */ + NV_ENC_CAPS_HEIGHT_MIN, + + /** + * Indicates HW support for multiple reference frames. + */ + NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES, + + /** + * Indicates HW support for HEVC with alpha encoding. + * \n 0 : HEVC with alpha encoding not supported. + * \n 1 : HEVC with alpha encoding is supported. + */ + NV_ENC_CAPS_SUPPORT_ALPHA_LAYER_ENCODING, + + /** + * Indicates number of Encoding engines present on GPU. + */ + NV_ENC_CAPS_NUM_ENCODER_ENGINES, + + /** + * Indicates single slice intra refresh support. + */ + NV_ENC_CAPS_SINGLE_SLICE_INTRA_REFRESH, + + /** + * Reserved - Not to be used by clients. + */ + NV_ENC_CAPS_EXPOSED_COUNT +} NV_ENC_CAPS; + +/** + * HEVC CU SIZE + */ +typedef enum _NV_ENC_HEVC_CUSIZE +{ + NV_ENC_HEVC_CUSIZE_AUTOSELECT = 0, + NV_ENC_HEVC_CUSIZE_8x8 = 1, + NV_ENC_HEVC_CUSIZE_16x16 = 2, + NV_ENC_HEVC_CUSIZE_32x32 = 3, + NV_ENC_HEVC_CUSIZE_64x64 = 4, +}NV_ENC_HEVC_CUSIZE; + +/** + * Input struct for querying Encoding capabilities. + */ +typedef struct _NV_ENC_CAPS_PARAM +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CAPS_PARAM_VER */ + NV_ENC_CAPS capsToQuery; /**< [in]: Specifies the encode capability to be queried. Client should pass a member for ::NV_ENC_CAPS enum. */ + uint32_t reserved[62]; /**< [in]: Reserved and must be set to 0 */ +} NV_ENC_CAPS_PARAM; + +/** NV_ENC_CAPS_PARAM struct version. */ +#define NV_ENC_CAPS_PARAM_VER NVENCAPI_STRUCT_VERSION(1) + + +/** + * Encoder Output parameters + */ +typedef struct _NV_ENC_ENCODE_OUT_PARAMS +{ + uint32_t version; /**< [out]: Struct version. */ + uint32_t bitstreamSizeInBytes; /**< [out]: Encoded bitstream size in bytes */ + uint32_t reserved[62]; /**< [out]: Reserved and must be set to 0 */ +} NV_ENC_ENCODE_OUT_PARAMS; + +/** NV_ENC_ENCODE_OUT_PARAMS struct version. */ +#define NV_ENC_ENCODE_OUT_PARAMS_VER NVENCAPI_STRUCT_VERSION(1) + +/** + * Creation parameters for input buffer. + */ +typedef struct _NV_ENC_CREATE_INPUT_BUFFER +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CREATE_INPUT_BUFFER_VER */ + uint32_t width; /**< [in]: Input frame width */ + uint32_t height; /**< [in]: Input frame height */ + NV_ENC_MEMORY_HEAP memoryHeap; /**< [in]: Deprecated. Do not use */ + NV_ENC_BUFFER_FORMAT bufferFmt; /**< [in]: Input buffer format */ + uint32_t reserved; /**< [in]: Reserved and must be set to 0 */ + NV_ENC_INPUT_PTR inputBuffer; /**< [out]: Pointer to input buffer */ + void* pSysMemBuffer; /**< [in]: Pointer to existing system memory buffer */ + uint32_t reserved1[57]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[63]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_CREATE_INPUT_BUFFER; + +/** NV_ENC_CREATE_INPUT_BUFFER struct version. */ +#define NV_ENC_CREATE_INPUT_BUFFER_VER NVENCAPI_STRUCT_VERSION(1) + +/** + * Creation parameters for output bitstream buffer. + */ +typedef struct _NV_ENC_CREATE_BITSTREAM_BUFFER +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CREATE_BITSTREAM_BUFFER_VER */ + uint32_t size; /**< [in]: Deprecated. Do not use */ + NV_ENC_MEMORY_HEAP memoryHeap; /**< [in]: Deprecated. Do not use */ + uint32_t reserved; /**< [in]: Reserved and must be set to 0 */ + NV_ENC_OUTPUT_PTR bitstreamBuffer; /**< [out]: Pointer to the output bitstream buffer */ + void* bitstreamBufferPtr; /**< [out]: Reserved and should not be used */ + uint32_t reserved1[58]; /**< [in]: Reserved and should be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and should be set to NULL */ +} NV_ENC_CREATE_BITSTREAM_BUFFER; + +/** NV_ENC_CREATE_BITSTREAM_BUFFER struct version. */ +#define NV_ENC_CREATE_BITSTREAM_BUFFER_VER NVENCAPI_STRUCT_VERSION(1) + +/** + * Structs needed for ME only mode. + */ +typedef struct _NV_ENC_MVECTOR +{ + int16_t mvx; /**< the x component of MV in quarter-pel units */ + int16_t mvy; /**< the y component of MV in quarter-pel units */ +} NV_ENC_MVECTOR; + +/** + * Motion vector structure per macroblock for H264 motion estimation. + */ +typedef struct _NV_ENC_H264_MV_DATA +{ + NV_ENC_MVECTOR mv[4]; /**< up to 4 vectors for 8x8 partition */ + uint8_t mbType; /**< 0 (I), 1 (P), 2 (IPCM), 3 (B) */ + uint8_t partitionType; /**< Specifies the block partition type. 0:16x16, 1:8x8, 2:16x8, 3:8x16 */ + uint16_t reserved; /**< reserved padding for alignment */ + uint32_t mbCost; +} NV_ENC_H264_MV_DATA; + +/** + * Motion vector structure per CU for HEVC motion estimation. + */ +typedef struct _NV_ENC_HEVC_MV_DATA +{ + NV_ENC_MVECTOR mv[4]; /**< up to 4 vectors within a CU */ + uint8_t cuType; /**< 0 (I), 1(P) */ + uint8_t cuSize; /**< 0: 8x8, 1: 16x16, 2: 32x32, 3: 64x64 */ + uint8_t partitionMode; /**< The CU partition mode + 0 (2Nx2N), 1 (2NxN), 2(Nx2N), 3 (NxN), + 4 (2NxnU), 5 (2NxnD), 6(nLx2N), 7 (nRx2N) */ + uint8_t lastCUInCTB; /**< Marker to separate CUs in the current CTB from CUs in the next CTB */ +} NV_ENC_HEVC_MV_DATA; + +/** + * Creation parameters for output motion vector buffer for ME only mode. + */ +typedef struct _NV_ENC_CREATE_MV_BUFFER +{ + uint32_t version; /**< [in]: Struct version. Must be set to NV_ENC_CREATE_MV_BUFFER_VER */ + NV_ENC_OUTPUT_PTR mvBuffer; /**< [out]: Pointer to the output motion vector buffer */ + uint32_t reserved1[255]; /**< [in]: Reserved and should be set to 0 */ + void* reserved2[63]; /**< [in]: Reserved and should be set to NULL */ +} NV_ENC_CREATE_MV_BUFFER; + +/** NV_ENC_CREATE_MV_BUFFER struct version*/ +#define NV_ENC_CREATE_MV_BUFFER_VER NVENCAPI_STRUCT_VERSION(1) + +/** + * QP value for frames + */ +typedef struct _NV_ENC_QP +{ + uint32_t qpInterP; /**< [in]: Specifies QP value for P-frame. Even though this field is uint32_t for legacy reasons, the client should treat this as a signed parameter(int32_t) for cases in which negative QP values are to be specified. */ + uint32_t qpInterB; /**< [in]: Specifies QP value for B-frame. Even though this field is uint32_t for legacy reasons, the client should treat this as a signed parameter(int32_t) for cases in which negative QP values are to be specified. */ + uint32_t qpIntra; /**< [in]: Specifies QP value for Intra Frame. Even though this field is uint32_t for legacy reasons, the client should treat this as a signed parameter(int32_t) for cases in which negative QP values are to be specified. */ +} NV_ENC_QP; + +/** + * Rate Control Configuration Parameters + */ + typedef struct _NV_ENC_RC_PARAMS + { + uint32_t version; + NV_ENC_PARAMS_RC_MODE rateControlMode; /**< [in]: Specifies the rate control mode. Check support for various rate control modes using ::NV_ENC_CAPS_SUPPORTED_RATECONTROL_MODES caps. */ + NV_ENC_QP constQP; /**< [in]: Specifies the initial QP to be used for encoding, these values would be used for all frames if in Constant QP mode. */ + uint32_t averageBitRate; /**< [in]: Specifies the average bitrate(in bits/sec) used for encoding. */ + uint32_t maxBitRate; /**< [in]: Specifies the maximum bitrate for the encoded output. This is used for VBR and ignored for CBR mode. */ + uint32_t vbvBufferSize; /**< [in]: Specifies the VBV(HRD) buffer size. in bits. Set 0 to use the default VBV buffer size. */ + uint32_t vbvInitialDelay; /**< [in]: Specifies the VBV(HRD) initial delay in bits. Set 0 to use the default VBV initial delay .*/ + uint32_t enableMinQP :1; /**< [in]: Set this to 1 if minimum QP used for rate control. */ + uint32_t enableMaxQP :1; /**< [in]: Set this to 1 if maximum QP used for rate control. */ + uint32_t enableInitialRCQP :1; /**< [in]: Set this to 1 if user supplied initial QP is used for rate control. */ + uint32_t enableAQ :1; /**< [in]: Set this to 1 to enable adaptive quantization (Spatial). */ + uint32_t reservedBitField1 :1; /**< [in]: Reserved bitfields and must be set to 0. */ + uint32_t enableLookahead :1; /**< [in]: Set this to 1 to enable lookahead with depth (if lookahead is enabled, input frames must remain available to the encoder until encode completion) */ + uint32_t disableIadapt :1; /**< [in]: Set this to 1 to disable adaptive I-frame insertion at scene cuts (only has an effect when lookahead is enabled) */ + uint32_t disableBadapt :1; /**< [in]: Set this to 1 to disable adaptive B-frame decision (only has an effect when lookahead is enabled) */ + uint32_t enableTemporalAQ :1; /**< [in]: Set this to 1 to enable temporal AQ */ + uint32_t zeroReorderDelay :1; /**< [in]: Set this to 1 to indicate zero latency operation (no reordering delay, num_reorder_frames=0) */ + uint32_t enableNonRefP :1; /**< [in]: Set this to 1 to enable automatic insertion of non-reference P-frames (no effect if enablePTD=0) */ + uint32_t strictGOPTarget :1; /**< [in]: Set this to 1 to minimize GOP-to-GOP rate fluctuations */ + uint32_t aqStrength :4; /**< [in]: When AQ (Spatial) is enabled (i.e. NV_ENC_RC_PARAMS::enableAQ is set), this field is used to specify AQ strength. AQ strength scale is from 1 (low) - 15 (aggressive). + If not set, strength is auto selected by driver. */ + uint32_t reservedBitFields :16; /**< [in]: Reserved bitfields and must be set to 0 */ + NV_ENC_QP minQP; /**< [in]: Specifies the minimum QP used for rate control. Client must set NV_ENC_CONFIG::enableMinQP to 1. */ + NV_ENC_QP maxQP; /**< [in]: Specifies the maximum QP used for rate control. Client must set NV_ENC_CONFIG::enableMaxQP to 1. */ + NV_ENC_QP initialRCQP; /**< [in]: Specifies the initial QP used for rate control. Client must set NV_ENC_CONFIG::enableInitialRCQP to 1. */ + uint32_t temporallayerIdxMask; /**< [in]: Specifies the temporal layers (as a bitmask) whose QPs have changed. Valid max bitmask is [2^NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS - 1]. + Applicable only for constant QP mode (NV_ENC_RC_PARAMS::rateControlMode = NV_ENC_PARAMS_RC_CONSTQP). */ + uint8_t temporalLayerQP[8]; /**< [in]: Specifies the temporal layer QPs used for rate control. Temporal layer index is used as the array index. + Applicable only for constant QP mode (NV_ENC_RC_PARAMS::rateControlMode = NV_ENC_PARAMS_RC_CONSTQP). */ + uint8_t targetQuality; /**< [in]: Target CQ (Constant Quality) level for VBR mode (range 0-51 with 0-automatic) */ + uint8_t targetQualityLSB; /**< [in]: Fractional part of target quality (as 8.8 fixed point format) */ + uint16_t lookaheadDepth; /**< [in]: Maximum depth of lookahead with range 0-(31 - number of B frames). + lookaheadDepth is only used if enableLookahead=1.*/ + uint8_t lowDelayKeyFrameScale; /**< [in]: Specifies the ratio of I frame bits to P frame bits in case of single frame VBV and CBR rate control mode, + is set to 2 by default for low latency tuning info and 1 by default for ultra low latency tuning info */ + uint8_t reserved1[3]; + NV_ENC_QP_MAP_MODE qpMapMode; /**< [in]: This flag is used to interpret values in array specified by NV_ENC_PIC_PARAMS::qpDeltaMap. + Set this to NV_ENC_QP_MAP_EMPHASIS to treat values specified by NV_ENC_PIC_PARAMS::qpDeltaMap as Emphasis Level Map. + Emphasis Level can be assigned any value specified in enum NV_ENC_EMPHASIS_MAP_LEVEL. + Emphasis Level Map is used to specify regions to be encoded at varying levels of quality. + The hardware encoder adjusts the quantization within the image as per the provided emphasis map, + by adjusting the quantization parameter (QP) assigned to each macroblock. This adjustment is commonly called “Delta QP”. + The adjustment depends on the absolute QP decided by the rate control algorithm, and is applied after the rate control has decided each macroblock’s QP. + Since the Delta QP overrides rate control, enabling Emphasis Level Map may violate bitrate and VBV buffer size constraints. + Emphasis Level Map is useful in situations where client has a priori knowledge of the image complexity (e.g. via use of NVFBC's Classification feature) and encoding those high-complexity areas at higher quality (lower QP) is important, even at the possible cost of violating bitrate/VBV buffer size constraints + This feature is not supported when AQ( Spatial/Temporal) is enabled. + This feature is only supported for H264 codec currently. + + Set this to NV_ENC_QP_MAP_DELTA to treat values specified by NV_ENC_PIC_PARAMS::qpDeltaMap as QP Delta. This specifies QP modifier to be applied on top of the QP chosen by rate control + + Set this to NV_ENC_QP_MAP_DISABLED to ignore NV_ENC_PIC_PARAMS::qpDeltaMap values. In this case, qpDeltaMap should be set to NULL. + + Other values are reserved for future use.*/ + NV_ENC_MULTI_PASS multiPass; /**< [in]: This flag is used to enable multi-pass encoding for a given ::NV_ENC_PARAMS_RC_MODE. This flag is not valid for H264 and HEVC MEOnly mode */ + uint32_t alphaLayerBitrateRatio; /**< [in]: Specifies the ratio in which bitrate should be split between base and alpha layer. A value 'x' for this field will split the target bitrate in a ratio of x : 1 between base and alpha layer. + The default split ratio is 15.*/ + int8_t cbQPIndexOffset; /**< [in]: Specifies the value of 'chroma_qp_index_offset' in H264 / 'pps_cb_qp_offset' in HEVC.*/ + int8_t crQPIndexOffset; /**< [in]: Specifies the value of 'second_chroma_qp_index_offset' in H264 / 'pps_cr_qp_offset' in HEVC.*/ + uint16_t reserved2; + uint32_t reserved[4]; + } NV_ENC_RC_PARAMS; + +/** macro for constructing the version field of ::_NV_ENC_RC_PARAMS */ +#define NV_ENC_RC_PARAMS_VER NVENCAPI_STRUCT_VERSION(1) + + + +/** + * \struct _NV_ENC_CONFIG_H264_VUI_PARAMETERS + * H264 Video Usability Info parameters + */ +typedef struct _NV_ENC_CONFIG_H264_VUI_PARAMETERS +{ + uint32_t overscanInfoPresentFlag; /**< [in]: if set to 1 , it specifies that the overscanInfo is present */ + uint32_t overscanInfo; /**< [in]: Specifies the overscan info(as defined in Annex E of the ITU-T Specification). */ + uint32_t videoSignalTypePresentFlag; /**< [in]: If set to 1, it specifies that the videoFormat, videoFullRangeFlag and colourDescriptionPresentFlag are present. */ + uint32_t videoFormat; /**< [in]: Specifies the source video format(as defined in Annex E of the ITU-T Specification).*/ + uint32_t videoFullRangeFlag; /**< [in]: Specifies the output range of the luma and chroma samples(as defined in Annex E of the ITU-T Specification). */ + uint32_t colourDescriptionPresentFlag; /**< [in]: If set to 1, it specifies that the colourPrimaries, transferCharacteristics and colourMatrix are present. */ + uint32_t colourPrimaries; /**< [in]: Specifies color primaries for converting to RGB(as defined in Annex E of the ITU-T Specification) */ + uint32_t transferCharacteristics; /**< [in]: Specifies the opto-electronic transfer characteristics to use (as defined in Annex E of the ITU-T Specification) */ + uint32_t colourMatrix; /**< [in]: Specifies the matrix coefficients used in deriving the luma and chroma from the RGB primaries (as defined in Annex E of the ITU-T Specification). */ + uint32_t chromaSampleLocationFlag; /**< [in]: if set to 1 , it specifies that the chromaSampleLocationTop and chromaSampleLocationBot are present.*/ + uint32_t chromaSampleLocationTop; /**< [in]: Specifies the chroma sample location for top field(as defined in Annex E of the ITU-T Specification) */ + uint32_t chromaSampleLocationBot; /**< [in]: Specifies the chroma sample location for bottom field(as defined in Annex E of the ITU-T Specification) */ + uint32_t bitstreamRestrictionFlag; /**< [in]: if set to 1, it specifies the bitstream restriction parameters are present in the bitstream.*/ + uint32_t reserved[15]; +}NV_ENC_CONFIG_H264_VUI_PARAMETERS; + +typedef NV_ENC_CONFIG_H264_VUI_PARAMETERS NV_ENC_CONFIG_HEVC_VUI_PARAMETERS; + +/** + * \struct _NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE + * External motion vector hint counts per block type. + * H264 supports multiple hint while HEVC supports one hint for each valid candidate. + */ +typedef struct _NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE +{ + uint32_t numCandsPerBlk16x16 : 4; /**< [in]: Supported for H264, HEVC. It Specifies the number of candidates per 16x16 block. */ + uint32_t numCandsPerBlk16x8 : 4; /**< [in]: Supported for H264 only. Specifies the number of candidates per 16x8 block. */ + uint32_t numCandsPerBlk8x16 : 4; /**< [in]: Supported for H264 only. Specifies the number of candidates per 8x16 block. */ + uint32_t numCandsPerBlk8x8 : 4; /**< [in]: Supported for H264, HEVC. Specifies the number of candidates per 8x8 block. */ + uint32_t reserved : 16; /**< [in]: Reserved for padding. */ + uint32_t reserved1[3]; /**< [in]: Reserved for future use. */ +} NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE; + + +/** + * \struct _NVENC_EXTERNAL_ME_HINT + * External Motion Vector hint structure for H264 and HEVC. + */ +typedef struct _NVENC_EXTERNAL_ME_HINT +{ + int32_t mvx : 12; /**< [in]: Specifies the x component of integer pixel MV (relative to current MB) S12.0. */ + int32_t mvy : 10; /**< [in]: Specifies the y component of integer pixel MV (relative to current MB) S10.0 .*/ + int32_t refidx : 5; /**< [in]: Specifies the reference index (31=invalid). Current we support only 1 reference frame per direction for external hints, so \p refidx must be 0. */ + int32_t dir : 1; /**< [in]: Specifies the direction of motion estimation . 0=L0 1=L1.*/ + int32_t partType : 2; /**< [in]: Specifies the block partition type.0=16x16 1=16x8 2=8x16 3=8x8 (blocks in partition must be consecutive).*/ + int32_t lastofPart : 1; /**< [in]: Set to 1 for the last MV of (sub) partition */ + int32_t lastOfMB : 1; /**< [in]: Set to 1 for the last MV of macroblock. */ +} NVENC_EXTERNAL_ME_HINT; + + +/** + * \struct _NV_ENC_CONFIG_H264 + * H264 encoder configuration parameters + */ +typedef struct _NV_ENC_CONFIG_H264 +{ + uint32_t enableTemporalSVC :1; /**< [in]: Set to 1 to enable SVC temporal*/ + uint32_t enableStereoMVC :1; /**< [in]: Set to 1 to enable stereo MVC*/ + uint32_t hierarchicalPFrames :1; /**< [in]: Set to 1 to enable hierarchical P Frames */ + uint32_t hierarchicalBFrames :1; /**< [in]: Set to 1 to enable hierarchical B Frames */ + uint32_t outputBufferingPeriodSEI :1; /**< [in]: Set to 1 to write SEI buffering period syntax in the bitstream */ + uint32_t outputPictureTimingSEI :1; /**< [in]: Set to 1 to write SEI picture timing syntax in the bitstream. When set for following rateControlMode : NV_ENC_PARAMS_RC_CBR, NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ, + NV_ENC_PARAMS_RC_CBR_HQ, filler data is inserted if needed to achieve HRD bitrate */ + uint32_t outputAUD :1; /**< [in]: Set to 1 to write access unit delimiter syntax in bitstream */ + uint32_t disableSPSPPS :1; /**< [in]: Set to 1 to disable writing of Sequence and Picture parameter info in bitstream */ + uint32_t outputFramePackingSEI :1; /**< [in]: Set to 1 to enable writing of frame packing arrangement SEI messages to bitstream */ + uint32_t outputRecoveryPointSEI :1; /**< [in]: Set to 1 to enable writing of recovery point SEI message */ + uint32_t enableIntraRefresh :1; /**< [in]: Set to 1 to enable gradual decoder refresh or intra refresh. If the GOP structure uses B frames this will be ignored */ + uint32_t enableConstrainedEncoding :1; /**< [in]: Set this to 1 to enable constrainedFrame encoding where each slice in the constrained picture is independent of other slices. + Constrained encoding works only with rectangular slices. + Check support for constrained encoding using ::NV_ENC_CAPS_SUPPORT_CONSTRAINED_ENCODING caps. */ + uint32_t repeatSPSPPS :1; /**< [in]: Set to 1 to enable writing of Sequence and Picture parameter for every IDR frame */ + uint32_t enableVFR :1; /**< [in]: Setting enableVFR=1 currently only sets the fixed_frame_rate_flag=0 in the VUI but otherwise + has no impact on the encoder behavior. For more details please refer to E.1 VUI syntax of H.264 standard. Note, however, that NVENC does not support VFR encoding and rate control. */ + uint32_t enableLTR :1; /**< [in]: Set to 1 to enable LTR (Long Term Reference) frame support. LTR can be used in two modes: "LTR Trust" mode and "LTR Per Picture" mode. + LTR Trust mode: In this mode, ltrNumFrames pictures after IDR are automatically marked as LTR. This mode is enabled by setting ltrTrustMode = 1. + Use of LTR Trust mode is strongly discouraged as this mode may be deprecated in future. + LTR Per Picture mode: In this mode, client can control whether the current picture should be marked as LTR. Enable this mode by setting + ltrTrustMode = 0 and ltrMarkFrame = 1 for the picture to be marked as LTR. This is the preferred mode + for using LTR. + Note that LTRs are not supported if encoding session is configured with B-frames */ + uint32_t qpPrimeYZeroTransformBypassFlag :1; /**< [in]: To enable lossless encode set this to 1, set QP to 0 and RC_mode to NV_ENC_PARAMS_RC_CONSTQP and profile to HIGH_444_PREDICTIVE_PROFILE. + Check support for lossless encoding using ::NV_ENC_CAPS_SUPPORT_LOSSLESS_ENCODE caps. */ + uint32_t useConstrainedIntraPred :1; /**< [in]: Set 1 to enable constrained intra prediction. */ + uint32_t enableFillerDataInsertion :1; /**< [in]: Set to 1 to enable insertion of filler data in the bitstream. + This flag will take effect only when one of the CBR rate + control modes (NV_ENC_PARAMS_RC_CBR, NV_ENC_PARAMS_RC_CBR_HQ, + NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ) is in use and both + NV_ENC_INITIALIZE_PARAMS::frameRateNum and + NV_ENC_INITIALIZE_PARAMS::frameRateDen are set to non-zero + values. Setting this field when + NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is also set + is currently not supported and will make ::NvEncInitializeEncoder() + return an error. */ + uint32_t disableSVCPrefixNalu :1; /**< [in]: Set to 1 to disable writing of SVC Prefix NALU preceding each slice in bitstream. + Applicable only when temporal SVC is enabled (NV_ENC_CONFIG_H264::enableTemporalSVC = 1). */ + uint32_t enableScalabilityInfoSEI :1; /**< [in]: Set to 1 to enable writing of Scalability Information SEI message preceding each IDR picture in bitstream + Applicable only when temporal SVC is enabled (NV_ENC_CONFIG_H264::enableTemporalSVC = 1). */ + uint32_t singleSliceIntraRefresh : 1; /**< [in]: Set to 1 to maintain single slice in frames during intra refresh. + Check support for single slice intra refresh using ::NV_ENC_CAPS_SINGLE_SLICE_INTRA_REFRESH caps. + This flag will be ignored if the value returned for ::NV_ENC_CAPS_SINGLE_SLICE_INTRA_REFRESH caps is false. */ + uint32_t reservedBitFields : 11; /**< [in]: Reserved bitfields and must be set to 0 */ + uint32_t level; /**< [in]: Specifies the encoding level. Client is recommended to set this to NV_ENC_LEVEL_AUTOSELECT in order to enable the NvEncodeAPI interface to select the correct level. */ + uint32_t idrPeriod; /**< [in]: Specifies the IDR interval. If not set, this is made equal to gopLength in NV_ENC_CONFIG.Low latency application client can set IDR interval to NVENC_INFINITE_GOPLENGTH so that IDR frames are not inserted automatically. */ + uint32_t separateColourPlaneFlag; /**< [in]: Set to 1 to enable 4:4:4 separate colour planes */ + uint32_t disableDeblockingFilterIDC; /**< [in]: Specifies the deblocking filter mode. Permissible value range: [0,2]. This flag corresponds + to the flag disable_deblocking_filter_idc specified in section 7.4.3 of H.264 specification, + which specifies whether the operation of the deblocking filter shall be disabled across some + block edges of the slice and specifies for which edges the filtering is disabled. See section + 7.4.3 of H.264 specification for more details.*/ + uint32_t numTemporalLayers; /**< [in]: Specifies number of temporal layers to be used for hierarchical coding / temporal SVC. Valid value range is [1,::NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS] */ + uint32_t spsId; /**< [in]: Specifies the SPS id of the sequence header */ + uint32_t ppsId; /**< [in]: Specifies the PPS id of the picture header */ + NV_ENC_H264_ADAPTIVE_TRANSFORM_MODE adaptiveTransformMode; /**< [in]: Specifies the AdaptiveTransform Mode. Check support for AdaptiveTransform mode using ::NV_ENC_CAPS_SUPPORT_ADAPTIVE_TRANSFORM caps. */ + NV_ENC_H264_FMO_MODE fmoMode; /**< [in]: Specified the FMO Mode. Check support for FMO using ::NV_ENC_CAPS_SUPPORT_FMO caps. */ + NV_ENC_H264_BDIRECT_MODE bdirectMode; /**< [in]: Specifies the BDirect mode. Check support for BDirect mode using ::NV_ENC_CAPS_SUPPORT_BDIRECT_MODE caps.*/ + NV_ENC_H264_ENTROPY_CODING_MODE entropyCodingMode; /**< [in]: Specifies the entropy coding mode. Check support for CABAC mode using ::NV_ENC_CAPS_SUPPORT_CABAC caps. */ + NV_ENC_STEREO_PACKING_MODE stereoMode; /**< [in]: Specifies the stereo frame packing mode which is to be signaled in frame packing arrangement SEI */ + uint32_t intraRefreshPeriod; /**< [in]: Specifies the interval between successive intra refresh if enableIntrarefresh is set. Requires enableIntraRefresh to be set. + Will be disabled if NV_ENC_CONFIG::gopLength is not set to NVENC_INFINITE_GOPLENGTH. */ + uint32_t intraRefreshCnt; /**< [in]: Specifies the length of intra refresh in number of frames for periodic intra refresh. This value should be smaller than intraRefreshPeriod */ + uint32_t maxNumRefFrames; /**< [in]: Specifies the DPB size used for encoding. Setting it to 0 will let driver use the default DPB size. + The low latency application which wants to invalidate reference frame as an error resilience tool + is recommended to use a large DPB size so that the encoder can keep old reference frames which can be used if recent + frames are invalidated. */ + uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices + sliceMode = 0 MB based slices, sliceMode = 1 Byte based slices, sliceMode = 2 MB row based slices, sliceMode = 3 numSlices in Picture. + When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting + When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */ + uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For: + sliceMode = 0, sliceModeData specifies # of MBs in each slice (except last slice) + sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice) + sliceMode = 2, sliceModeData specifies # of MB rows in each slice (except last slice) + sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */ + NV_ENC_CONFIG_H264_VUI_PARAMETERS h264VUIParameters; /**< [in]: Specifies the H264 video usability info parameters */ + uint32_t ltrNumFrames; /**< [in]: Specifies the number of LTR frames. This parameter has different meaning in two LTR modes. + In "LTR Trust" mode (ltrTrustMode = 1), encoder will mark the first ltrNumFrames base layer reference frames within each IDR interval as LTR. + In "LTR Per Picture" mode (ltrTrustMode = 0 and ltrMarkFrame = 1), ltrNumFrames specifies maximum number of LTR frames in DPB. */ + uint32_t ltrTrustMode; /**< [in]: Specifies the LTR operating mode. See comments near NV_ENC_CONFIG_H264::enableLTR for description of the two modes. + Set to 1 to use "LTR Trust" mode of LTR operation. Clients are discouraged to use "LTR Trust" mode as this mode may + be deprecated in future releases. + Set to 0 when using "LTR Per Picture" mode of LTR operation. */ + uint32_t chromaFormatIDC; /**< [in]: Specifies the chroma format. Should be set to 1 for yuv420 input, 3 for yuv444 input. + Check support for YUV444 encoding using ::NV_ENC_CAPS_SUPPORT_YUV444_ENCODE caps.*/ + uint32_t maxTemporalLayers; /**< [in]: Specifies the maximum temporal layer used for temporal SVC / hierarchical coding. + Defaut value of this field is NV_ENC_CAPS::NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS. Note that the value NV_ENC_CONFIG_H264::maxNumRefFrames should + be greater than or equal to (NV_ENC_CONFIG_H264::maxTemporalLayers - 2) * 2, for NV_ENC_CONFIG_H264::maxTemporalLayers >= 2.*/ + NV_ENC_BFRAME_REF_MODE useBFramesAsRef; /**< [in]: Specifies the B-Frame as reference mode. Check support for useBFramesAsRef mode using ::NV_ENC_CAPS_SUPPORT_BFRAME_REF_MODE caps.*/ + NV_ENC_NUM_REF_FRAMES numRefL0; /**< [in]: Specifies max number of reference frames in reference picture list L0, that can be used by hardware for prediction of a frame. + Check support for numRefL0 using ::NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES caps. */ + NV_ENC_NUM_REF_FRAMES numRefL1; /**< [in]: Specifies max number of reference frames in reference picture list L1, that can be used by hardware for prediction of a frame. + Check support for numRefL1 using ::NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES caps. */ + uint32_t reserved1[267]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_CONFIG_H264; + +/** + * \struct _NV_ENC_CONFIG_HEVC + * HEVC encoder configuration parameters to be set during initialization. + */ +typedef struct _NV_ENC_CONFIG_HEVC +{ + uint32_t level; /**< [in]: Specifies the level of the encoded bitstream.*/ + uint32_t tier; /**< [in]: Specifies the level tier of the encoded bitstream.*/ + NV_ENC_HEVC_CUSIZE minCUSize; /**< [in]: Specifies the minimum size of luma coding unit.*/ + NV_ENC_HEVC_CUSIZE maxCUSize; /**< [in]: Specifies the maximum size of luma coding unit. Currently NVENC SDK only supports maxCUSize equal to NV_ENC_HEVC_CUSIZE_32x32.*/ + uint32_t useConstrainedIntraPred :1; /**< [in]: Set 1 to enable constrained intra prediction. */ + uint32_t disableDeblockAcrossSliceBoundary :1; /**< [in]: Set 1 to disable in loop filtering across slice boundary.*/ + uint32_t outputBufferingPeriodSEI :1; /**< [in]: Set 1 to write SEI buffering period syntax in the bitstream */ + uint32_t outputPictureTimingSEI :1; /**< [in]: Set 1 to write SEI picture timing syntax in the bitstream */ + uint32_t outputAUD :1; /**< [in]: Set 1 to write Access Unit Delimiter syntax. */ + uint32_t enableLTR :1; /**< [in]: Set to 1 to enable LTR (Long Term Reference) frame support. LTR can be used in two modes: "LTR Trust" mode and "LTR Per Picture" mode. + LTR Trust mode: In this mode, ltrNumFrames pictures after IDR are automatically marked as LTR. This mode is enabled by setting ltrTrustMode = 1. + Use of LTR Trust mode is strongly discouraged as this mode may be deprecated in future releases. + LTR Per Picture mode: In this mode, client can control whether the current picture should be marked as LTR. Enable this mode by setting + ltrTrustMode = 0 and ltrMarkFrame = 1 for the picture to be marked as LTR. This is the preferred mode + for using LTR. + Note that LTRs are not supported if encoding session is configured with B-frames */ + uint32_t disableSPSPPS :1; /**< [in]: Set 1 to disable VPS, SPS and PPS signaling in the bitstream. */ + uint32_t repeatSPSPPS :1; /**< [in]: Set 1 to output VPS,SPS and PPS for every IDR frame.*/ + uint32_t enableIntraRefresh :1; /**< [in]: Set 1 to enable gradual decoder refresh or intra refresh. If the GOP structure uses B frames this will be ignored */ + uint32_t chromaFormatIDC :2; /**< [in]: Specifies the chroma format. Should be set to 1 for yuv420 input, 3 for yuv444 input.*/ + uint32_t pixelBitDepthMinus8 :3; /**< [in]: Specifies pixel bit depth minus 8. Should be set to 0 for 8 bit input, 2 for 10 bit input.*/ + uint32_t enableFillerDataInsertion :1; /**< [in]: Set to 1 to enable insertion of filler data in the bitstream. + This flag will take effect only when one of the CBR rate + control modes (NV_ENC_PARAMS_RC_CBR, NV_ENC_PARAMS_RC_CBR_HQ, + NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ) is in use and both + NV_ENC_INITIALIZE_PARAMS::frameRateNum and + NV_ENC_INITIALIZE_PARAMS::frameRateDen are set to non-zero + values. Setting this field when + NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is also set + is currently not supported and will make ::NvEncInitializeEncoder() + return an error. */ + uint32_t enableConstrainedEncoding :1; /**< [in]: Set this to 1 to enable constrainedFrame encoding where each slice in the constrained picture is independent of other slices. + Constrained encoding works only with rectangular slices. + Check support for constrained encoding using ::NV_ENC_CAPS_SUPPORT_CONSTRAINED_ENCODING caps. */ + uint32_t enableAlphaLayerEncoding :1; /**< [in]: Set this to 1 to enable HEVC encode with alpha layer. */ + uint32_t singleSliceIntraRefresh : 1; /**< [in]: Set this to 1 to maintain single slice frames during intra refresh. + Check support for single slice intra refresh using ::NV_ENC_CAPS_SINGLE_SLICE_INTRA_REFRESH caps. + This flag will be ignored if the value returned for ::NV_ENC_CAPS_SINGLE_SLICE_INTRA_REFRESH caps is false. */ + uint32_t reserved : 14; /**< [in]: Reserved bitfields.*/ + uint32_t idrPeriod; /**< [in]: Specifies the IDR interval. If not set, this is made equal to gopLength in NV_ENC_CONFIG. Low latency application client can set IDR interval to NVENC_INFINITE_GOPLENGTH so that IDR frames are not inserted automatically. */ + uint32_t intraRefreshPeriod; /**< [in]: Specifies the interval between successive intra refresh if enableIntrarefresh is set. Requires enableIntraRefresh to be set. + Will be disabled if NV_ENC_CONFIG::gopLength is not set to NVENC_INFINITE_GOPLENGTH. */ + uint32_t intraRefreshCnt; /**< [in]: Specifies the length of intra refresh in number of frames for periodic intra refresh. This value should be smaller than intraRefreshPeriod */ + uint32_t maxNumRefFramesInDPB; /**< [in]: Specifies the maximum number of references frames in the DPB.*/ + uint32_t ltrNumFrames; /**< [in]: This parameter has different meaning in two LTR modes. + In "LTR Trust" mode (ltrTrustMode = 1), encoder will mark the first ltrNumFrames base layer reference frames within each IDR interval as LTR. + In "LTR Per Picture" mode (ltrTrustMode = 0 and ltrMarkFrame = 1), ltrNumFrames specifies maximum number of LTR frames in DPB. */ + uint32_t vpsId; /**< [in]: Specifies the VPS id of the video parameter set */ + uint32_t spsId; /**< [in]: Specifies the SPS id of the sequence header */ + uint32_t ppsId; /**< [in]: Specifies the PPS id of the picture header */ + uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices + sliceMode = 0 CTU based slices, sliceMode = 1 Byte based slices, sliceMode = 2 CTU row based slices, sliceMode = 3, numSlices in Picture + When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */ + uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For: + sliceMode = 0, sliceModeData specifies # of CTUs in each slice (except last slice) + sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice) + sliceMode = 2, sliceModeData specifies # of CTU rows in each slice (except last slice) + sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */ + uint32_t maxTemporalLayersMinus1; /**< [in]: Specifies the max temporal layer used for hierarchical coding. */ + NV_ENC_CONFIG_HEVC_VUI_PARAMETERS hevcVUIParameters; /**< [in]: Specifies the HEVC video usability info parameters */ + uint32_t ltrTrustMode; /**< [in]: Specifies the LTR operating mode. See comments near NV_ENC_CONFIG_HEVC::enableLTR for description of the two modes. + Set to 1 to use "LTR Trust" mode of LTR operation. Clients are discouraged to use "LTR Trust" mode as this mode may + be deprecated in future releases. + Set to 0 when using "LTR Per Picture" mode of LTR operation. */ + NV_ENC_BFRAME_REF_MODE useBFramesAsRef; /**< [in]: Specifies the B-Frame as reference mode. Check support for useBFramesAsRef mode using ::NV_ENC_CAPS_SUPPORT_BFRAME_REF_MODE caps.*/ + NV_ENC_NUM_REF_FRAMES numRefL0; /**< [in]: Specifies max number of reference frames in reference picture list L0, that can be used by hardware for prediction of a frame. + Check support for numRefL0 using ::NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES caps. */ + NV_ENC_NUM_REF_FRAMES numRefL1; /**< [in]: Specifies max number of reference frames in reference picture list L1, that can be used by hardware for prediction of a frame. + Check support for numRefL1 using ::NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES caps. */ + uint32_t reserved1[214]; /**< [in]: Reserved and must be set to 0.*/ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_CONFIG_HEVC; + +/** + * \struct _NV_ENC_CONFIG_H264_MEONLY + * H264 encoder configuration parameters for ME only Mode + * + */ +typedef struct _NV_ENC_CONFIG_H264_MEONLY +{ + uint32_t disablePartition16x16 :1; /**< [in]: Disable Motion Estimation on 16x16 blocks*/ + uint32_t disablePartition8x16 :1; /**< [in]: Disable Motion Estimation on 8x16 blocks*/ + uint32_t disablePartition16x8 :1; /**< [in]: Disable Motion Estimation on 16x8 blocks*/ + uint32_t disablePartition8x8 :1; /**< [in]: Disable Motion Estimation on 8x8 blocks*/ + uint32_t disableIntraSearch :1; /**< [in]: Disable Intra search during Motion Estimation*/ + uint32_t bStereoEnable :1; /**< [in]: Enable Stereo Mode for Motion Estimation where each view is independently executed*/ + uint32_t reserved :26; /**< [in]: Reserved and must be set to 0 */ + uint32_t reserved1 [255]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_CONFIG_H264_MEONLY; + + +/** + * \struct _NV_ENC_CONFIG_HEVC_MEONLY + * HEVC encoder configuration parameters for ME only Mode + * + */ +typedef struct _NV_ENC_CONFIG_HEVC_MEONLY +{ + uint32_t reserved [256]; /**< [in]: Reserved and must be set to 0 */ + void* reserved1[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_CONFIG_HEVC_MEONLY; + +/** + * \struct _NV_ENC_CODEC_CONFIG + * Codec-specific encoder configuration parameters to be set during initialization. + */ +typedef union _NV_ENC_CODEC_CONFIG +{ + NV_ENC_CONFIG_H264 h264Config; /**< [in]: Specifies the H.264-specific encoder configuration. */ + NV_ENC_CONFIG_HEVC hevcConfig; /**< [in]: Specifies the HEVC-specific encoder configuration. */ + NV_ENC_CONFIG_H264_MEONLY h264MeOnlyConfig; /**< [in]: Specifies the H.264-specific ME only encoder configuration. */ + NV_ENC_CONFIG_HEVC_MEONLY hevcMeOnlyConfig; /**< [in]: Specifies the HEVC-specific ME only encoder configuration. */ + uint32_t reserved[320]; /**< [in]: Reserved and must be set to 0 */ +} NV_ENC_CODEC_CONFIG; + + +/** + * \struct _NV_ENC_CONFIG + * Encoder configuration parameters to be set during initialization. + */ +typedef struct _NV_ENC_CONFIG +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CONFIG_VER. */ + GUID profileGUID; /**< [in]: Specifies the codec profile GUID. If client specifies \p NV_ENC_CODEC_PROFILE_AUTOSELECT_GUID the NvEncodeAPI interface will select the appropriate codec profile. */ + uint32_t gopLength; /**< [in]: Specifies the number of pictures in one GOP. Low latency application client can set goplength to NVENC_INFINITE_GOPLENGTH so that keyframes are not inserted automatically. */ + int32_t frameIntervalP; /**< [in]: Specifies the GOP pattern as follows: \p frameIntervalP = 0: I, 1: IPP, 2: IBP, 3: IBBP If goplength is set to NVENC_INFINITE_GOPLENGTH \p frameIntervalP should be set to 1. */ + uint32_t monoChromeEncoding; /**< [in]: Set this to 1 to enable monochrome encoding for this session. */ + NV_ENC_PARAMS_FRAME_FIELD_MODE frameFieldMode; /**< [in]: Specifies the frame/field mode. + Check support for field encoding using ::NV_ENC_CAPS_SUPPORT_FIELD_ENCODING caps. + Using a frameFieldMode other than NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME for RGB input is not supported. */ + NV_ENC_MV_PRECISION mvPrecision; /**< [in]: Specifies the desired motion vector prediction precision. */ + NV_ENC_RC_PARAMS rcParams; /**< [in]: Specifies the rate control parameters for the current encoding session. */ + NV_ENC_CODEC_CONFIG encodeCodecConfig; /**< [in]: Specifies the codec specific config parameters through this union. */ + uint32_t reserved [278]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_CONFIG; + +/** macro for constructing the version field of ::_NV_ENC_CONFIG */ +#define NV_ENC_CONFIG_VER (NVENCAPI_STRUCT_VERSION(7) | ( 1<<31 )) + +/** + * Tuning information of NVENC encoding (TuningInfo is not applicable to H264 and HEVC MEOnly mode). + */ +typedef enum NV_ENC_TUNING_INFO +{ + NV_ENC_TUNING_INFO_UNDEFINED = 0, /**< Undefined tuningInfo. Invalid value for encoding. */ + NV_ENC_TUNING_INFO_HIGH_QUALITY = 1, /**< Tune presets for latency tolerant encoding.*/ + NV_ENC_TUNING_INFO_LOW_LATENCY = 2, /**< Tune presets for low latency streaming.*/ + NV_ENC_TUNING_INFO_ULTRA_LOW_LATENCY = 3, /**< Tune presets for ultra low latency streaming.*/ + NV_ENC_TUNING_INFO_LOSSLESS = 4, /**< Tune presets for lossless encoding.*/ + NV_ENC_TUNING_INFO_COUNT /**< Count number of tuningInfos. Invalid value. */ +}NV_ENC_TUNING_INFO; + +/** + * \struct _NV_ENC_INITIALIZE_PARAMS + * Encode Session Initialization parameters. + */ +typedef struct _NV_ENC_INITIALIZE_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_INITIALIZE_PARAMS_VER. */ + GUID encodeGUID; /**< [in]: Specifies the Encode GUID for which the encoder is being created. ::NvEncInitializeEncoder() API will fail if this is not set, or set to unsupported value. */ + GUID presetGUID; /**< [in]: Specifies the preset for encoding. If the preset GUID is set then , the preset configuration will be applied before any other parameter. */ + uint32_t encodeWidth; /**< [in]: Specifies the encode width. If not set ::NvEncInitializeEncoder() API will fail. */ + uint32_t encodeHeight; /**< [in]: Specifies the encode height. If not set ::NvEncInitializeEncoder() API will fail. */ + uint32_t darWidth; /**< [in]: Specifies the display aspect ratio Width. */ + uint32_t darHeight; /**< [in]: Specifies the display aspect ratio height. */ + uint32_t frameRateNum; /**< [in]: Specifies the numerator for frame rate used for encoding in frames per second ( Frame rate = frameRateNum / frameRateDen ). */ + uint32_t frameRateDen; /**< [in]: Specifies the denominator for frame rate used for encoding in frames per second ( Frame rate = frameRateNum / frameRateDen ). */ + uint32_t enableEncodeAsync; /**< [in]: Set this to 1 to enable asynchronous mode and is expected to use events to get picture completion notification. */ + uint32_t enablePTD; /**< [in]: Set this to 1 to enable the Picture Type Decision is be taken by the NvEncodeAPI interface. */ + uint32_t reportSliceOffsets :1; /**< [in]: Set this to 1 to enable reporting slice offsets in ::_NV_ENC_LOCK_BITSTREAM. NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync must be set to 0 to use this feature. Client must set this to 0 if NV_ENC_CONFIG_H264::sliceMode is 1 on Kepler GPUs */ + uint32_t enableSubFrameWrite :1; /**< [in]: Set this to 1 to write out available bitstream to memory at subframe intervals. + If enableSubFrameWrite = 1, then the hardware encoder returns data as soon as a slice has completed encoding. + This results in better encoding latency, but the downside is that the application has to keep polling via a call to nvEncLockBitstream API continuously to see if any encoded slice data is available. + Use this mode if you feel that the marginal reduction in latency from sub-frame encoding is worth the increase in complexity due to CPU-based polling. */ + uint32_t enableExternalMEHints :1; /**< [in]: Set to 1 to enable external ME hints for the current frame. For NV_ENC_INITIALIZE_PARAMS::enablePTD=1 with B frames, programming L1 hints is optional for B frames since Client doesn't know internal GOP structure. + NV_ENC_PIC_PARAMS::meHintRefPicDist should preferably be set with enablePTD=1. */ + uint32_t enableMEOnlyMode :1; /**< [in]: Set to 1 to enable ME Only Mode .*/ + uint32_t enableWeightedPrediction :1; /**< [in]: Set this to 1 to enable weighted prediction. Not supported if encode session is configured for B-Frames (i.e. NV_ENC_CONFIG::frameIntervalP > 1 or preset >=P3 when tuningInfo = ::NV_ENC_TUNING_INFO_HIGH_QUALITY or + tuningInfo = ::NV_ENC_TUNING_INFO_LOSSLESS. This is because preset >=p3 internally enables B frames when tuningInfo = ::NV_ENC_TUNING_INFO_HIGH_QUALITY or ::NV_ENC_TUNING_INFO_LOSSLESS). */ + uint32_t enableOutputInVidmem :1; /**< [in]: Set this to 1 to enable output of NVENC in video memory buffer created by application. This feature is not supported for HEVC ME only mode. */ + uint32_t reservedBitFields :26; /**< [in]: Reserved bitfields and must be set to 0 */ + uint32_t privDataSize; /**< [in]: Reserved private data buffer size and must be set to 0 */ + void* privData; /**< [in]: Reserved private data buffer and must be set to NULL */ + NV_ENC_CONFIG* encodeConfig; /**< [in]: Specifies the advanced codec specific structure. If client has sent a valid codec config structure, it will override parameters set by the NV_ENC_INITIALIZE_PARAMS::presetGUID parameter. If set to NULL the NvEncodeAPI interface will use the NV_ENC_INITIALIZE_PARAMS::presetGUID to set the codec specific parameters. + Client can also optionally query the NvEncodeAPI interface to get codec specific parameters for a presetGUID using ::NvEncGetEncodePresetConfig() API. It can then modify (if required) some of the codec config parameters and send down a custom config structure as part of ::_NV_ENC_INITIALIZE_PARAMS. + Even in this case client is recommended to pass the same preset guid it has used in ::NvEncGetEncodePresetConfig() API to query the config structure; as NV_ENC_INITIALIZE_PARAMS::presetGUID. This will not override the custom config structure but will be used to determine other Encoder HW specific parameters not exposed in the API. */ + uint32_t maxEncodeWidth; /**< [in]: Maximum encode width to be used for current Encode session. + Client should allocate output buffers according to this dimension for dynamic resolution change. If set to 0, Encoder will not allow dynamic resolution change. */ + uint32_t maxEncodeHeight; /**< [in]: Maximum encode height to be allowed for current Encode session. + Client should allocate output buffers according to this dimension for dynamic resolution change. If set to 0, Encode will not allow dynamic resolution change. */ + NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE maxMEHintCountsPerBlock[2]; /**< [in]: If Client wants to pass external motion vectors in NV_ENC_PIC_PARAMS::meExternalHints buffer it must specify the maximum number of hint candidates per block per direction for the encode session. + The NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[0] is for L0 predictors and NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[1] is for L1 predictors. + This client must also set NV_ENC_INITIALIZE_PARAMS::enableExternalMEHints to 1. */ + NV_ENC_TUNING_INFO tuningInfo; /**< [in]: Tuning Info of NVENC encoding(TuningInfo is not applicable to H264 and HEVC meonly mode). */ + NV_ENC_BUFFER_FORMAT bufferFormat; /**< [in]: Specifies input buffer format. Client should set input buffer format only when D3D12 interface type is used. */ + uint32_t reserved[287]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_INITIALIZE_PARAMS; + +/** macro for constructing the version field of ::_NV_ENC_INITIALIZE_PARAMS */ +#define NV_ENC_INITIALIZE_PARAMS_VER (NVENCAPI_STRUCT_VERSION(5) | ( 1<<31 )) + + +/** + * \struct _NV_ENC_RECONFIGURE_PARAMS + * Encode Session Reconfigured parameters. + */ +typedef struct _NV_ENC_RECONFIGURE_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_RECONFIGURE_PARAMS_VER. */ + NV_ENC_INITIALIZE_PARAMS reInitEncodeParams; /**< [in]: Encoder session re-initialization parameters. + If reInitEncodeParams.encodeConfig is NULL and + reInitEncodeParams.presetGUID is the same as the preset + GUID specified on the call to NvEncInitializeEncoder(), + EncodeAPI will continue to use the existing encode + configuration. + If reInitEncodeParams.encodeConfig is NULL and + reInitEncodeParams.presetGUID is different from the preset + GUID specified on the call to NvEncInitializeEncoder(), + EncodeAPI will try to use the default configuration for + the preset specified by reInitEncodeParams.presetGUID. + In this case, reconfiguration may fail if the new + configuration is incompatible with the existing + configuration (e.g. the new configuration results in + a change in the GOP structure). */ + uint32_t resetEncoder :1; /**< [in]: This resets the rate control states and other internal encoder states. This should be used only with an IDR frame. + If NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1, encoder will force the frame type to IDR */ + uint32_t forceIDR :1; /**< [in]: Encode the current picture as an IDR picture. This flag is only valid when Picture type decision is taken by the Encoder + [_NV_ENC_INITIALIZE_PARAMS::enablePTD == 1]. */ + uint32_t reserved :30; + +}NV_ENC_RECONFIGURE_PARAMS; + +/** macro for constructing the version field of ::_NV_ENC_RECONFIGURE_PARAMS */ +#define NV_ENC_RECONFIGURE_PARAMS_VER (NVENCAPI_STRUCT_VERSION(1) | ( 1<<31 )) + +/** + * \struct _NV_ENC_PRESET_CONFIG + * Encoder preset config + */ +typedef struct _NV_ENC_PRESET_CONFIG +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_PRESET_CONFIG_VER. */ + NV_ENC_CONFIG presetCfg; /**< [out]: preset config returned by the Nvidia Video Encoder interface. */ + uint32_t reserved1[255]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +}NV_ENC_PRESET_CONFIG; + +/** macro for constructing the version field of ::_NV_ENC_PRESET_CONFIG */ +#define NV_ENC_PRESET_CONFIG_VER (NVENCAPI_STRUCT_VERSION(4) | ( 1<<31 )) + + +/** + * \struct _NV_ENC_PIC_PARAMS_MVC + * MVC-specific parameters to be sent on a per-frame basis. + */ +typedef struct _NV_ENC_PIC_PARAMS_MVC +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_PIC_PARAMS_MVC_VER. */ + uint32_t viewID; /**< [in]: Specifies the view ID associated with the current input view. */ + uint32_t temporalID; /**< [in]: Specifies the temporal ID associated with the current input view. */ + uint32_t priorityID; /**< [in]: Specifies the priority ID associated with the current input view. Reserved and ignored by the NvEncodeAPI interface. */ + uint32_t reserved1[12]; /**< [in]: Reserved and must be set to 0. */ + void* reserved2[8]; /**< [in]: Reserved and must be set to NULL. */ +}NV_ENC_PIC_PARAMS_MVC; + +/** macro for constructing the version field of ::_NV_ENC_PIC_PARAMS_MVC */ +#define NV_ENC_PIC_PARAMS_MVC_VER NVENCAPI_STRUCT_VERSION(1) + + +/** + * \union _NV_ENC_PIC_PARAMS_H264_EXT + * H264 extension picture parameters + */ +typedef union _NV_ENC_PIC_PARAMS_H264_EXT +{ + NV_ENC_PIC_PARAMS_MVC mvcPicParams; /**< [in]: Specifies the MVC picture parameters. */ + uint32_t reserved1[32]; /**< [in]: Reserved and must be set to 0. */ +}NV_ENC_PIC_PARAMS_H264_EXT; + +/** + * \struct _NV_ENC_SEI_PAYLOAD + * User SEI message + */ +typedef struct _NV_ENC_SEI_PAYLOAD +{ + uint32_t payloadSize; /**< [in] SEI payload size in bytes. SEI payload must be byte aligned, as described in Annex D */ + uint32_t payloadType; /**< [in] SEI payload types and syntax can be found in Annex D of the H.264 Specification. */ + uint8_t *payload; /**< [in] pointer to user data */ +} NV_ENC_SEI_PAYLOAD; + +#define NV_ENC_H264_SEI_PAYLOAD NV_ENC_SEI_PAYLOAD + +/** + * \struct _NV_ENC_PIC_PARAMS_H264 + * H264 specific enc pic params. sent on a per frame basis. + */ +typedef struct _NV_ENC_PIC_PARAMS_H264 +{ + uint32_t displayPOCSyntax; /**< [in]: Specifies the display POC syntax This is required to be set if client is handling the picture type decision. */ + uint32_t reserved3; /**< [in]: Reserved and must be set to 0 */ + uint32_t refPicFlag; /**< [in]: Set to 1 for a reference picture. This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */ + uint32_t colourPlaneId; /**< [in]: Specifies the colour plane ID associated with the current input. */ + uint32_t forceIntraRefreshWithFrameCnt; /**< [in]: Forces an intra refresh with duration equal to intraRefreshFrameCnt. + When outputRecoveryPointSEI is set this is value is used for recovery_frame_cnt in recovery point SEI message + forceIntraRefreshWithFrameCnt cannot be used if B frames are used in the GOP structure specified */ + uint32_t constrainedFrame :1; /**< [in]: Set to 1 if client wants to encode this frame with each slice completely independent of other slices in the frame. + NV_ENC_INITIALIZE_PARAMS::enableConstrainedEncoding should be set to 1 */ + uint32_t sliceModeDataUpdate :1; /**< [in]: Set to 1 if client wants to change the sliceModeData field to specify new sliceSize Parameter + When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting */ + uint32_t ltrMarkFrame :1; /**< [in]: Set to 1 if client wants to mark this frame as LTR */ + uint32_t ltrUseFrames :1; /**< [in]: Set to 1 if client allows encoding this frame using the LTR frames specified in ltrFrameBitmap */ + uint32_t reservedBitFields :28; /**< [in]: Reserved bit fields and must be set to 0 */ + uint8_t* sliceTypeData; /**< [in]: Deprecated. */ + uint32_t sliceTypeArrayCnt; /**< [in]: Deprecated. */ + uint32_t seiPayloadArrayCnt; /**< [in]: Specifies the number of elements allocated in seiPayloadArray array. */ + NV_ENC_SEI_PAYLOAD* seiPayloadArray; /**< [in]: Array of SEI payloads which will be inserted for this frame. */ + uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices + sliceMode = 0 MB based slices, sliceMode = 1 Byte based slices, sliceMode = 2 MB row based slices, sliceMode = 3, numSlices in Picture + When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting + When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */ + uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For: + sliceMode = 0, sliceModeData specifies # of MBs in each slice (except last slice) + sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice) + sliceMode = 2, sliceModeData specifies # of MB rows in each slice (except last slice) + sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */ + uint32_t ltrMarkFrameIdx; /**< [in]: Specifies the long term referenceframe index to use for marking this frame as LTR.*/ + uint32_t ltrUseFrameBitmap; /**< [in]: Specifies the associated bitmap of LTR frame indices to use when encoding this frame. */ + uint32_t ltrUsageMode; /**< [in]: Not supported. Reserved for future use and must be set to 0. */ + uint32_t forceIntraSliceCount; /**< [in]: Specifies the number of slices to be forced to Intra in the current picture. + This option along with forceIntraSliceIdx[] array needs to be used with sliceMode = 3 only */ + uint32_t *forceIntraSliceIdx; /**< [in]: Slice indices to be forced to intra in the current picture. Each slice index should be <= num_slices_in_picture -1. Index starts from 0 for first slice. + The number of entries in this array should be equal to forceIntraSliceCount */ + NV_ENC_PIC_PARAMS_H264_EXT h264ExtPicParams; /**< [in]: Specifies the H264 extension config parameters using this config. */ + uint32_t reserved [210]; /**< [in]: Reserved and must be set to 0. */ + void* reserved2[61]; /**< [in]: Reserved and must be set to NULL. */ +} NV_ENC_PIC_PARAMS_H264; + +/** + * \struct _NV_ENC_PIC_PARAMS_HEVC + * HEVC specific enc pic params. sent on a per frame basis. + */ +typedef struct _NV_ENC_PIC_PARAMS_HEVC +{ + uint32_t displayPOCSyntax; /**< [in]: Specifies the display POC syntax This is required to be set if client is handling the picture type decision. */ + uint32_t refPicFlag; /**< [in]: Set to 1 for a reference picture. This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */ + uint32_t temporalId; /**< [in]: Specifies the temporal id of the picture */ + uint32_t forceIntraRefreshWithFrameCnt; /**< [in]: Forces an intra refresh with duration equal to intraRefreshFrameCnt. + When outputRecoveryPointSEI is set this is value is used for recovery_frame_cnt in recovery point SEI message + forceIntraRefreshWithFrameCnt cannot be used if B frames are used in the GOP structure specified */ + uint32_t constrainedFrame :1; /**< [in]: Set to 1 if client wants to encode this frame with each slice completely independent of other slices in the frame. + NV_ENC_INITIALIZE_PARAMS::enableConstrainedEncoding should be set to 1 */ + uint32_t sliceModeDataUpdate :1; /**< [in]: Set to 1 if client wants to change the sliceModeData field to specify new sliceSize Parameter + When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting */ + uint32_t ltrMarkFrame :1; /**< [in]: Set to 1 if client wants to mark this frame as LTR */ + uint32_t ltrUseFrames :1; /**< [in]: Set to 1 if client allows encoding this frame using the LTR frames specified in ltrFrameBitmap */ + uint32_t reservedBitFields :28; /**< [in]: Reserved bit fields and must be set to 0 */ + uint8_t* sliceTypeData; /**< [in]: Array which specifies the slice type used to force intra slice for a particular slice. Currently supported only for NV_ENC_CONFIG_H264::sliceMode == 3. + Client should allocate array of size sliceModeData where sliceModeData is specified in field of ::_NV_ENC_CONFIG_H264 + Array element with index n corresponds to nth slice. To force a particular slice to intra client should set corresponding array element to NV_ENC_SLICE_TYPE_I + all other array elements should be set to NV_ENC_SLICE_TYPE_DEFAULT */ + uint32_t sliceTypeArrayCnt; /**< [in]: Client should set this to the number of elements allocated in sliceTypeData array. If sliceTypeData is NULL then this should be set to 0 */ + uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices + sliceMode = 0 CTU based slices, sliceMode = 1 Byte based slices, sliceMode = 2 CTU row based slices, sliceMode = 3, numSlices in Picture + When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting + When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */ + uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For: + sliceMode = 0, sliceModeData specifies # of CTUs in each slice (except last slice) + sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice) + sliceMode = 2, sliceModeData specifies # of CTU rows in each slice (except last slice) + sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */ + uint32_t ltrMarkFrameIdx; /**< [in]: Specifies the long term reference frame index to use for marking this frame as LTR.*/ + uint32_t ltrUseFrameBitmap; /**< [in]: Specifies the associated bitmap of LTR frame indices to use when encoding this frame. */ + uint32_t ltrUsageMode; /**< [in]: Not supported. Reserved for future use and must be set to 0. */ + uint32_t seiPayloadArrayCnt; /**< [in]: Specifies the number of elements allocated in seiPayloadArray array. */ + uint32_t reserved; /**< [in]: Reserved and must be set to 0. */ + NV_ENC_SEI_PAYLOAD* seiPayloadArray; /**< [in]: Array of SEI payloads which will be inserted for this frame. */ + uint32_t reserved2 [244]; /**< [in]: Reserved and must be set to 0. */ + void* reserved3[61]; /**< [in]: Reserved and must be set to NULL. */ +} NV_ENC_PIC_PARAMS_HEVC; + +/** + * Codec specific per-picture encoding parameters. + */ +typedef union _NV_ENC_CODEC_PIC_PARAMS +{ + NV_ENC_PIC_PARAMS_H264 h264PicParams; /**< [in]: H264 encode picture params. */ + NV_ENC_PIC_PARAMS_HEVC hevcPicParams; /**< [in]: HEVC encode picture params. */ + uint32_t reserved[256]; /**< [in]: Reserved and must be set to 0. */ +} NV_ENC_CODEC_PIC_PARAMS; + +/** + * \struct _NV_ENC_PIC_PARAMS + * Encoding parameters that need to be sent on a per frame basis. + */ +typedef struct _NV_ENC_PIC_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_PIC_PARAMS_VER. */ + uint32_t inputWidth; /**< [in]: Specifies the input frame width */ + uint32_t inputHeight; /**< [in]: Specifies the input frame height */ + uint32_t inputPitch; /**< [in]: Specifies the input buffer pitch. If pitch value is not known, set this to inputWidth. */ + uint32_t encodePicFlags; /**< [in]: Specifies bit-wise OR of encode picture flags. See ::NV_ENC_PIC_FLAGS enum. */ + uint32_t frameIdx; /**< [in]: Specifies the frame index associated with the input frame [optional]. */ + uint64_t inputTimeStamp; /**< [in]: Specifies opaque data which is associated with the encoded frame, but not actually encoded in the output bitstream. + This opaque data can be used later to uniquely refer to the corresponding encoded frame. For example, it can be used + for identifying the frame to be invalidated in the reference picture buffer, if lost at the client. */ + uint64_t inputDuration; /**< [in]: Specifies duration of the input picture */ + NV_ENC_INPUT_PTR inputBuffer; /**< [in]: Specifies the input buffer pointer. Client must use a pointer obtained from ::NvEncCreateInputBuffer() or ::NvEncMapInputResource() APIs.*/ + NV_ENC_OUTPUT_PTR outputBitstream; /**< [in]: Specifies the output buffer pointer. + If NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is set to 0, specifies the pointer to output buffer. Client should use a pointer obtained from ::NvEncCreateBitstreamBuffer() API. + If NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is set to 1, client should allocate buffer in video memory for NV_ENC_ENCODE_OUT_PARAMS struct and encoded bitstream data. Client + should use a pointer obtained from ::NvEncMapInputResource() API, when mapping this output buffer and assign it to NV_ENC_PIC_PARAMS::outputBitstream. + First 256 bytes of this buffer should be interpreted as NV_ENC_ENCODE_OUT_PARAMS struct followed by encoded bitstream data. Recommended size for output buffer is sum of size of + NV_ENC_ENCODE_OUT_PARAMS struct and twice the input frame size for lower resolution eg. CIF and 1.5 times the input frame size for higher resolutions. If encoded bitstream size is + greater than the allocated buffer size for encoded bitstream, then the output buffer will have encoded bitstream data equal to buffer size. All CUDA operations on this buffer must use + the default stream. */ + void* completionEvent; /**< [in]: Specifies an event to be signaled on completion of encoding of this Frame [only if operating in Asynchronous mode]. Each output buffer should be associated with a distinct event pointer. */ + NV_ENC_BUFFER_FORMAT bufferFmt; /**< [in]: Specifies the input buffer format. */ + NV_ENC_PIC_STRUCT pictureStruct; /**< [in]: Specifies structure of the input picture. */ + NV_ENC_PIC_TYPE pictureType; /**< [in]: Specifies input picture type. Client required to be set explicitly by the client if the client has not set NV_ENC_INITALIZE_PARAMS::enablePTD to 1 while calling NvInitializeEncoder. */ + NV_ENC_CODEC_PIC_PARAMS codecPicParams; /**< [in]: Specifies the codec specific per-picture encoding parameters. */ + NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE meHintCountsPerBlock[2]; /**< [in]: For H264 and Hevc, specifies the number of hint candidates per block per direction for the current frame. meHintCountsPerBlock[0] is for L0 predictors and meHintCountsPerBlock[1] is for L1 predictors. + The candidate count in NV_ENC_PIC_PARAMS::meHintCountsPerBlock[lx] must never exceed NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[lx] provided during encoder initialization. */ + NVENC_EXTERNAL_ME_HINT* meExternalHints; /**< [in]: For H264 and Hevc, Specifies the pointer to ME external hints for the current frame. The size of ME hint buffer should be equal to number of macroblocks * the total number of candidates per macroblock. + The total number of candidates per MB per direction = 1*meHintCountsPerBlock[Lx].numCandsPerBlk16x16 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk16x8 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk8x8 + + 4*meHintCountsPerBlock[Lx].numCandsPerBlk8x8. For frames using bidirectional ME , the total number of candidates for single macroblock is sum of total number of candidates per MB for each direction (L0 and L1) */ + uint32_t reserved1[6]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[2]; /**< [in]: Reserved and must be set to NULL */ + int8_t *qpDeltaMap; /**< [in]: Specifies the pointer to signed byte array containing value per MB for H264 and per CTB for HEVC in raster scan order for the current picture, which will be interpreted depending on NV_ENC_RC_PARAMS::qpMapMode. + If NV_ENC_RC_PARAMS::qpMapMode is NV_ENC_QP_MAP_DELTA, qpDeltaMap specifies QP modifier per MB for H264 and per CTB for HEVC. This QP modifier will be applied on top of the QP chosen by rate control. + If NV_ENC_RC_PARAMS::qpMapMode is NV_ENC_QP_MAP_EMPHASIS, qpDeltaMap specifies Emphasis Level Map per MB for H264. This level value along with QP chosen by rate control is used to + compute the QP modifier, which in turn is applied on top of QP chosen by rate control. + If NV_ENC_RC_PARAMS::qpMapMode is NV_ENC_QP_MAP_DISABLED, value in qpDeltaMap will be ignored.*/ + uint32_t qpDeltaMapSize; /**< [in]: Specifies the size in bytes of qpDeltaMap surface allocated by client and pointed to by NV_ENC_PIC_PARAMS::qpDeltaMap. Surface (array) should be picWidthInMbs * picHeightInMbs for H264 and picWidthInCtbs * picHeightInCtbs for HEVC */ + uint32_t reservedBitFields; /**< [in]: Reserved bitfields and must be set to 0 */ + uint16_t meHintRefPicDist[2]; /**< [in]: Specifies temporal distance for reference picture (NVENC_EXTERNAL_ME_HINT::refidx = 0) used during external ME with NV_ENC_INITALIZE_PARAMS::enablePTD = 1 . meHintRefPicDist[0] is for L0 hints and meHintRefPicDist[1] is for L1 hints. + If not set, will internally infer distance of 1. Ignored for NV_ENC_INITALIZE_PARAMS::enablePTD = 0 */ + NV_ENC_INPUT_PTR alphaBuffer; /**< [in]: Specifies the input alpha buffer pointer. Client must use a pointer obtained from ::NvEncCreateInputBuffer() or ::NvEncMapInputResource() APIs. + Applicable only when encoding hevc with alpha layer is enabled. */ + uint32_t reserved3[286]; /**< [in]: Reserved and must be set to 0 */ + void* reserved4[59]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_PIC_PARAMS; + +/** Macro for constructing the version field of ::_NV_ENC_PIC_PARAMS */ +#define NV_ENC_PIC_PARAMS_VER (NVENCAPI_STRUCT_VERSION(4) | ( 1<<31 )) + + +/** + * \struct _NV_ENC_MEONLY_PARAMS + * MEOnly parameters that need to be sent on a per motion estimation basis. + * NV_ENC_MEONLY_PARAMS::meExternalHints is supported for H264 only. + */ +typedef struct _NV_ENC_MEONLY_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to NV_ENC_MEONLY_PARAMS_VER.*/ + uint32_t inputWidth; /**< [in]: Specifies the input frame width */ + uint32_t inputHeight; /**< [in]: Specifies the input frame height */ + NV_ENC_INPUT_PTR inputBuffer; /**< [in]: Specifies the input buffer pointer. Client must use a pointer obtained from NvEncCreateInputBuffer() or NvEncMapInputResource() APIs. */ + NV_ENC_INPUT_PTR referenceFrame; /**< [in]: Specifies the reference frame pointer */ + NV_ENC_OUTPUT_PTR mvBuffer; /**< [in]: Specifies the output buffer pointer. + If NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is set to 0, specifies the pointer to motion vector data buffer allocated by NvEncCreateMVBuffer. + Client must lock mvBuffer using ::NvEncLockBitstream() API to get the motion vector data. + If NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is set to 1, client should allocate buffer in video memory for storing the motion vector data. The size of this buffer must + be equal to total number of macroblocks multiplied by size of NV_ENC_H264_MV_DATA struct. Client should use a pointer obtained from ::NvEncMapInputResource() API, when mapping this + output buffer and assign it to NV_ENC_MEONLY_PARAMS::mvBuffer. All CUDA operations on this buffer must use the default stream. */ + NV_ENC_BUFFER_FORMAT bufferFmt; /**< [in]: Specifies the input buffer format. */ + void* completionEvent; /**< [in]: Specifies an event to be signaled on completion of motion estimation + of this Frame [only if operating in Asynchronous mode]. + Each output buffer should be associated with a distinct event pointer. */ + uint32_t viewID; /**< [in]: Specifies left or right viewID if NV_ENC_CONFIG_H264_MEONLY::bStereoEnable is set. + viewID can be 0,1 if bStereoEnable is set, 0 otherwise. */ + NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE + meHintCountsPerBlock[2]; /**< [in]: Specifies the number of hint candidates per block for the current frame. meHintCountsPerBlock[0] is for L0 predictors. + The candidate count in NV_ENC_PIC_PARAMS::meHintCountsPerBlock[lx] must never exceed NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[lx] provided during encoder initialization. */ + NVENC_EXTERNAL_ME_HINT *meExternalHints; /**< [in]: Specifies the pointer to ME external hints for the current frame. The size of ME hint buffer should be equal to number of macroblocks * the total number of candidates per macroblock. + The total number of candidates per MB per direction = 1*meHintCountsPerBlock[Lx].numCandsPerBlk16x16 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk16x8 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk8x8 + + 4*meHintCountsPerBlock[Lx].numCandsPerBlk8x8. For frames using bidirectional ME , the total number of candidates for single macroblock is sum of total number of candidates per MB for each direction (L0 and L1) */ + uint32_t reserved1[243]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[59]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_MEONLY_PARAMS; + +/** NV_ENC_MEONLY_PARAMS struct version*/ +#define NV_ENC_MEONLY_PARAMS_VER NVENCAPI_STRUCT_VERSION(3) + + +/** + * \struct _NV_ENC_LOCK_BITSTREAM + * Bitstream buffer lock parameters. + */ +typedef struct _NV_ENC_LOCK_BITSTREAM +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_LOCK_BITSTREAM_VER. */ + uint32_t doNotWait :1; /**< [in]: If this flag is set, the NvEncodeAPI interface will return buffer pointer even if operation is not completed. If not set, the call will block until operation completes. */ + uint32_t ltrFrame :1; /**< [out]: Flag indicating this frame is marked as LTR frame */ + uint32_t getRCStats :1; /**< [in]: If this flag is set then lockBitstream call will add additional intra-inter MB count and average MVX, MVY */ + uint32_t reservedBitFields :29; /**< [in]: Reserved bit fields and must be set to 0 */ + void* outputBitstream; /**< [in]: Pointer to the bitstream buffer being locked. */ + uint32_t* sliceOffsets; /**< [in, out]: Array which receives the slice offsets. This is not supported if NV_ENC_CONFIG_H264::sliceMode is 1 on Kepler GPUs. Array size must be equal to size of frame in MBs. */ + uint32_t frameIdx; /**< [out]: Frame no. for which the bitstream is being retrieved. */ + uint32_t hwEncodeStatus; /**< [out]: The NvEncodeAPI interface status for the locked picture. */ + uint32_t numSlices; /**< [out]: Number of slices in the encoded picture. Will be reported only if NV_ENC_INITIALIZE_PARAMS::reportSliceOffsets set to 1. */ + uint32_t bitstreamSizeInBytes; /**< [out]: Actual number of bytes generated and copied to the memory pointed by bitstreamBufferPtr. + When HEVC alpha layer encoding is enabled, this field reports the total encoded size in bytes i.e it is the encoded size of the base plus the alpha layer. */ + uint64_t outputTimeStamp; /**< [out]: Presentation timestamp associated with the encoded output. */ + uint64_t outputDuration; /**< [out]: Presentation duration associates with the encoded output. */ + void* bitstreamBufferPtr; /**< [out]: Pointer to the generated output bitstream. + For MEOnly mode _NV_ENC_LOCK_BITSTREAM::bitstreamBufferPtr should be typecast to + NV_ENC_H264_MV_DATA/NV_ENC_HEVC_MV_DATA pointer respectively for H264/HEVC */ + NV_ENC_PIC_TYPE pictureType; /**< [out]: Picture type of the encoded picture. */ + NV_ENC_PIC_STRUCT pictureStruct; /**< [out]: Structure of the generated output picture. */ + uint32_t frameAvgQP; /**< [out]: Average QP of the frame. */ + uint32_t frameSatd; /**< [out]: Total SATD cost for whole frame. */ + uint32_t ltrFrameIdx; /**< [out]: Frame index associated with this LTR frame. */ + uint32_t ltrFrameBitmap; /**< [out]: Bitmap of LTR frames indices which were used for encoding this frame. Value of 0 if no LTR frames were used. */ + uint32_t temporalId; /**< [out]: TemporalId value of the frame when using temporalSVC encoding */ + uint32_t reserved[12]; /**< [in]: Reserved and must be set to 0 */ + uint32_t intraMBCount; /**< [out]: For H264, Number of Intra MBs in the encoded frame. For HEVC, Number of Intra CTBs in the encoded frame. Supported only if _NV_ENC_LOCK_BITSTREAM::getRCStats set to 1. */ + uint32_t interMBCount; /**< [out]: For H264, Number of Inter MBs in the encoded frame, includes skip MBs. For HEVC, Number of Inter CTBs in the encoded frame. Supported only if _NV_ENC_LOCK_BITSTREAM::getRCStats set to 1. */ + int32_t averageMVX; /**< [out]: Average Motion Vector in X direction for the encoded frame. Supported only if _NV_ENC_LOCK_BITSTREAM::getRCStats set to 1. */ + int32_t averageMVY; /**< [out]: Average Motion Vector in y direction for the encoded frame. Supported only if _NV_ENC_LOCK_BITSTREAM::getRCStats set to 1. */ + uint32_t alphaLayerSizeInBytes; /**< [out]: Number of bytes generated for the alpha layer in the encoded output. Applicable only when HEVC with alpha encoding is enabled. */ + + uint32_t reserved1[218]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_LOCK_BITSTREAM; + +/** Macro for constructing the version field of ::_NV_ENC_LOCK_BITSTREAM */ +#define NV_ENC_LOCK_BITSTREAM_VER NVENCAPI_STRUCT_VERSION(1) + + +/** + * \struct _NV_ENC_LOCK_INPUT_BUFFER + * Uncompressed Input Buffer lock parameters. + */ +typedef struct _NV_ENC_LOCK_INPUT_BUFFER +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_LOCK_INPUT_BUFFER_VER. */ + uint32_t doNotWait :1; /**< [in]: Set to 1 to make ::NvEncLockInputBuffer() a unblocking call. If the encoding is not completed, driver will return ::NV_ENC_ERR_ENCODER_BUSY error code. */ + uint32_t reservedBitFields :31; /**< [in]: Reserved bitfields and must be set to 0 */ + NV_ENC_INPUT_PTR inputBuffer; /**< [in]: Pointer to the input buffer to be locked, client should pass the pointer obtained from ::NvEncCreateInputBuffer() or ::NvEncMapInputResource API. */ + void* bufferDataPtr; /**< [out]: Pointed to the locked input buffer data. Client can only access input buffer using the \p bufferDataPtr. */ + uint32_t pitch; /**< [out]: Pitch of the locked input buffer. */ + uint32_t reserved1[251]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_LOCK_INPUT_BUFFER; + +/** Macro for constructing the version field of ::_NV_ENC_LOCK_INPUT_BUFFER */ +#define NV_ENC_LOCK_INPUT_BUFFER_VER NVENCAPI_STRUCT_VERSION(1) + + +/** + * \struct _NV_ENC_MAP_INPUT_RESOURCE + * Map an input resource to a Nvidia Encoder Input Buffer + */ +typedef struct _NV_ENC_MAP_INPUT_RESOURCE +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_MAP_INPUT_RESOURCE_VER. */ + uint32_t subResourceIndex; /**< [in]: Deprecated. Do not use. */ + void* inputResource; /**< [in]: Deprecated. Do not use. */ + NV_ENC_REGISTERED_PTR registeredResource; /**< [in]: The Registered resource handle obtained by calling NvEncRegisterInputResource. */ + NV_ENC_INPUT_PTR mappedResource; /**< [out]: Mapped pointer corresponding to the registeredResource. This pointer must be used in NV_ENC_PIC_PARAMS::inputBuffer parameter in ::NvEncEncodePicture() API. */ + NV_ENC_BUFFER_FORMAT mappedBufferFmt; /**< [out]: Buffer format of the outputResource. This buffer format must be used in NV_ENC_PIC_PARAMS::bufferFmt if client using the above mapped resource pointer. */ + uint32_t reserved1[251]; /**< [in]: Reserved and must be set to 0. */ + void* reserved2[63]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_MAP_INPUT_RESOURCE; + +/** Macro for constructing the version field of ::_NV_ENC_MAP_INPUT_RESOURCE */ +#define NV_ENC_MAP_INPUT_RESOURCE_VER NVENCAPI_STRUCT_VERSION(4) + +/** + * \struct _NV_ENC_INPUT_RESOURCE_OPENGL_TEX + * NV_ENC_REGISTER_RESOURCE::resourceToRegister must be a pointer to a variable of this type, + * when NV_ENC_REGISTER_RESOURCE::resourceType is NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX + */ +typedef struct _NV_ENC_INPUT_RESOURCE_OPENGL_TEX +{ + uint32_t texture; /**< [in]: The name of the texture to be used. */ + uint32_t target; /**< [in]: Accepted values are GL_TEXTURE_RECTANGLE and GL_TEXTURE_2D. */ +} NV_ENC_INPUT_RESOURCE_OPENGL_TEX; + +/** \struct NV_ENC_FENCE_POINT_D3D12 + * Fence and fence value for synchronization. + */ +typedef struct _NV_ENC_FENCE_POINT_D3D12 +{ + void* pFence; /**< [in]: Pointer to ID3D12Fence. This fence object is used for synchronization. */ + uint64_t value; /**< [in]: Fence value to reach or exceed before the GPU operation or + fence value to set the fence to, after the GPU operation. */ +} NV_ENC_FENCE_POINT_D3D12; + +/** + * \struct _NV_ENC_INPUT_RESOURCE_D3D12 + * NV_ENC_PIC_PARAMS::inputBuffer and NV_ENC_PIC_PARAMS::alphaBuffer must be a pointer to a struct of this type, + * when D3D12 interface is used + */ +typedef struct _NV_ENC_INPUT_RESOURCE_D3D12 +{ + NV_ENC_REGISTERED_PTR pInputBuffer; /**< [in]: Specifies the input surface pointer. Client must use a pointer obtained from NvEncRegisterResource() in NV_ENC_REGISTER_RESOURCE::registeredResource + when registering input surface. */ + NV_ENC_FENCE_POINT_D3D12 inputFencePoint; /**< [in]: Specifies the input fence and corresponding fence value to do GPU wait. + This fence will be used to do GPU wait until the specified fence reaches or exceeds the specified value. */ + uint32_t reserved1[16]; /**< [in]: Reserved and must be set to 0. */ + void* reserved2[16]; /**< [in]: Reserved and must be set to NULL. */ +} NV_ENC_INPUT_RESOURCE_D3D12; + +/** + * \struct _NV_ENC_OUTPUT_RESOURCE_D3D12 + * NV_ENC_PIC_PARAMS::outputBitstream and NV_ENC_LOCK_BITSTREAM::outputBitstream must be a pointer to a struct of this type, + * when D3D12 interface is used + */ +typedef struct _NV_ENC_OUTPUT_RESOURCE_D3D12 +{ + NV_ENC_REGISTERED_PTR pOutputBuffer; /**< [in]: Specifies the output buffer pointer. Client must use a pointer obtained from NvEncRegisterResource() in NV_ENC_REGISTER_RESOURCE::registeredResource + when registering output bitstream buffer */ + NV_ENC_FENCE_POINT_D3D12 outputFencePoint; /**< [in]: Specifies the output fence and corresponding fence value to set after GPU operation is finished.*/ + uint32_t reserved1[16]; /**< [in]: Reserved and must be set to 0. */ + void* reserved2[16]; /**< [in]: Reserved and must be set to NULL. */ +} NV_ENC_OUTPUT_RESOURCE_D3D12; + +/** + * \struct _NV_ENC_REGISTER_RESOURCE + * Register a resource for future use with the Nvidia Video Encoder Interface. + */ +typedef struct _NV_ENC_REGISTER_RESOURCE +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_REGISTER_RESOURCE_VER. */ + NV_ENC_INPUT_RESOURCE_TYPE resourceType; /**< [in]: Specifies the type of resource to be registered. + Supported values are + ::NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX, + ::NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR, + ::NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX */ + uint32_t width; /**< [in]: Input frame width. */ + uint32_t height; /**< [in]: Input frame height. */ + uint32_t pitch; /**< [in]: Input buffer pitch. + For ::NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX resources, set this to 0. + For ::NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR resources, set this to + the pitch as obtained from cuMemAllocPitch(), or to the width in + bytes (if this resource was created by using cuMemAlloc()). This + value must be a multiple of 4. + For ::NV_ENC_INPUT_RESOURCE_TYPE_CUDAARRAY resources, set this to the + width of the allocation in bytes (i.e. + CUDA_ARRAY3D_DESCRIPTOR::Width * CUDA_ARRAY3D_DESCRIPTOR::NumChannels). + For ::NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX resources, set this to the + texture width multiplied by the number of components in the texture + format. */ + uint32_t subResourceIndex; /**< [in]: Subresource Index of the DirectX resource to be registered. Should be set to 0 for other interfaces. */ + void* resourceToRegister; /**< [in]: Handle to the resource that is being registered. */ + NV_ENC_REGISTERED_PTR registeredResource; /**< [out]: Registered resource handle. This should be used in future interactions with the Nvidia Video Encoder Interface. */ + NV_ENC_BUFFER_FORMAT bufferFormat; /**< [in]: Buffer format of resource to be registered. */ + NV_ENC_BUFFER_USAGE bufferUsage; /**< [in]: Usage of resource to be registered. */ + NV_ENC_FENCE_POINT_D3D12* pInputFencePoint; /**< [in]: Specifies the pointer to input fence and corresponding fence value to do GPU wait. + To be used only when NV_ENC_REGISTER_RESOURCE::resourceToRegister represents D3D12 surface and + NV_ENC_BUFFER_USAGE::bufferUsage is NV_ENC_INPUT_IMAGE. + This fence will be used to do GPU wait until the specified fence reaches or exceeds the specified value. */ + NV_ENC_FENCE_POINT_D3D12* pOutputFencePoint; /**< [in]: Specifies the pointer to output fence and corresponding fence value to set after GPU operation is finished. + To be used only when NV_ENC_REGISTER_RESOURCE::resourceToRegister represents D3D12 surface and + NV_ENC_BUFFER_USAGE::bufferUsage is NV_ENC_INPUT_IMAGE. */ + uint32_t reserved1[247]; /**< [in]: Reserved and must be set to 0. */ + void* reserved2[60]; /**< [in]: Reserved and must be set to NULL. */ +} NV_ENC_REGISTER_RESOURCE; + +/** Macro for constructing the version field of ::_NV_ENC_REGISTER_RESOURCE */ +#define NV_ENC_REGISTER_RESOURCE_VER NVENCAPI_STRUCT_VERSION(3) + +/** + * \struct _NV_ENC_STAT + * Encode Stats structure. + */ +typedef struct _NV_ENC_STAT +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_STAT_VER. */ + uint32_t reserved; /**< [in]: Reserved and must be set to 0 */ + NV_ENC_OUTPUT_PTR outputBitStream; /**< [out]: Specifies the pointer to output bitstream. */ + uint32_t bitStreamSize; /**< [out]: Size of generated bitstream in bytes. */ + uint32_t picType; /**< [out]: Picture type of encoded picture. See ::NV_ENC_PIC_TYPE. */ + uint32_t lastValidByteOffset; /**< [out]: Offset of last valid bytes of completed bitstream */ + uint32_t sliceOffsets[16]; /**< [out]: Offsets of each slice */ + uint32_t picIdx; /**< [out]: Picture number */ + uint32_t frameAvgQP; /**< [out]: Average QP of the frame. */ + uint32_t ltrFrame :1; /**< [out]: Flag indicating this frame is marked as LTR frame */ + uint32_t reservedBitFields :31; /**< [in]: Reserved bit fields and must be set to 0 */ + uint32_t ltrFrameIdx; /**< [out]: Frame index associated with this LTR frame. */ + uint32_t intraMBCount; /**< [out]: For H264, Number of Intra MBs in the encoded frame. For HEVC, Number of Intra CTBs in the encoded frame. */ + uint32_t interMBCount; /**< [out]: For H264, Number of Inter MBs in the encoded frame, includes skip MBs. For HEVC, Number of Inter CTBs in the encoded frame. */ + int32_t averageMVX; /**< [out]: Average Motion Vector in X direction for the encoded frame. */ + int32_t averageMVY; /**< [out]: Average Motion Vector in y direction for the encoded frame. */ + uint32_t reserved1[226]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_STAT; + +/** Macro for constructing the version field of ::_NV_ENC_STAT */ +#define NV_ENC_STAT_VER NVENCAPI_STRUCT_VERSION(1) + + +/** + * \struct _NV_ENC_SEQUENCE_PARAM_PAYLOAD + * Sequence and picture paramaters payload. + */ +typedef struct _NV_ENC_SEQUENCE_PARAM_PAYLOAD +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_INITIALIZE_PARAMS_VER. */ + uint32_t inBufferSize; /**< [in]: Specifies the size of the spsppsBuffer provided by the client */ + uint32_t spsId; /**< [in]: Specifies the SPS id to be used in sequence header. Default value is 0. */ + uint32_t ppsId; /**< [in]: Specifies the PPS id to be used in picture header. Default value is 0. */ + void* spsppsBuffer; /**< [in]: Specifies bitstream header pointer of size NV_ENC_SEQUENCE_PARAM_PAYLOAD::inBufferSize. + It is the client's responsibility to manage this memory. */ + uint32_t* outSPSPPSPayloadSize; /**< [out]: Size of the sequence and picture header in bytes. */ + uint32_t reserved [250]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_SEQUENCE_PARAM_PAYLOAD; + +/** Macro for constructing the version field of ::_NV_ENC_SEQUENCE_PARAM_PAYLOAD */ +#define NV_ENC_SEQUENCE_PARAM_PAYLOAD_VER NVENCAPI_STRUCT_VERSION(1) + + +/** + * Event registration/unregistration parameters. + */ +typedef struct _NV_ENC_EVENT_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_EVENT_PARAMS_VER. */ + uint32_t reserved; /**< [in]: Reserved and must be set to 0 */ + void* completionEvent; /**< [in]: Handle to event to be registered/unregistered with the NvEncodeAPI interface. */ + uint32_t reserved1[253]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_EVENT_PARAMS; + +/** Macro for constructing the version field of ::_NV_ENC_EVENT_PARAMS */ +#define NV_ENC_EVENT_PARAMS_VER NVENCAPI_STRUCT_VERSION(1) + +/** + * Encoder Session Creation parameters + */ +typedef struct _NV_ENC_OPEN_ENCODE_SESSIONEX_PARAMS +{ + uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER. */ + NV_ENC_DEVICE_TYPE deviceType; /**< [in]: Specified the device Type */ + void* device; /**< [in]: Pointer to client device. */ + void* reserved; /**< [in]: Reserved and must be set to 0. */ + uint32_t apiVersion; /**< [in]: API version. Should be set to NVENCAPI_VERSION. */ + uint32_t reserved1[253]; /**< [in]: Reserved and must be set to 0 */ + void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS; +/** Macro for constructing the version field of ::_NV_ENC_OPEN_ENCODE_SESSIONEX_PARAMS */ +#define NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER NVENCAPI_STRUCT_VERSION(1) + +/** @} */ /* END ENCODER_STRUCTURE */ + + +/** + * \addtogroup ENCODE_FUNC NvEncodeAPI Functions + * @{ + */ + +// NvEncOpenEncodeSession +/** + * \brief Opens an encoding session. + * + * Deprecated. + * + * \return + * ::NV_ENC_ERR_INVALID_CALL\n + * + */ +NVENCSTATUS NVENCAPI NvEncOpenEncodeSession (void* device, uint32_t deviceType, void** encoder); + +// NvEncGetEncodeGuidCount +/** + * \brief Retrieves the number of supported encode GUIDs. + * + * The function returns the number of codec GUIDs supported by the NvEncodeAPI + * interface. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [out] encodeGUIDCount + * Number of supported encode GUIDs. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeGUIDCount (void* encoder, uint32_t* encodeGUIDCount); + + +// NvEncGetEncodeGUIDs +/** + * \brief Retrieves an array of supported encoder codec GUIDs. + * + * The function returns an array of codec GUIDs supported by the NvEncodeAPI interface. + * The client must allocate an array where the NvEncodeAPI interface can + * fill the supported GUIDs and pass the pointer in \p *GUIDs parameter. + * The size of the array can be determined by using ::NvEncGetEncodeGUIDCount() API. + * The Nvidia Encoding interface returns the number of codec GUIDs it has actually + * filled in the GUID array in the \p GUIDCount parameter. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] guidArraySize + * Number of GUIDs to retrieved. Should be set to the number retrieved using + * ::NvEncGetEncodeGUIDCount. + * \param [out] GUIDs + * Array of supported Encode GUIDs. + * \param [out] GUIDCount + * Number of supported Encode GUIDs. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeGUIDs (void* encoder, GUID* GUIDs, uint32_t guidArraySize, uint32_t* GUIDCount); + + +// NvEncGetEncodeProfileGuidCount +/** + * \brief Retrieves the number of supported profile GUIDs. + * + * The function returns the number of profile GUIDs supported for a given codec. + * The client must first enumerate the codec GUIDs supported by the NvEncodeAPI + * interface. After determining the codec GUID, it can query the NvEncodeAPI + * interface to determine the number of profile GUIDs supported for a particular + * codec GUID. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * The codec GUID for which the profile GUIDs are being enumerated. + * \param [out] encodeProfileGUIDCount + * Number of encode profiles supported for the given encodeGUID. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeProfileGUIDCount (void* encoder, GUID encodeGUID, uint32_t* encodeProfileGUIDCount); + + +// NvEncGetEncodeProfileGUIDs +/** + * \brief Retrieves an array of supported encode profile GUIDs. + * + * The function returns an array of supported profile GUIDs for a particular + * codec GUID. The client must allocate an array where the NvEncodeAPI interface + * can populate the profile GUIDs. The client can determine the array size using + * ::NvEncGetEncodeProfileGUIDCount() API. The client must also validiate that the + * NvEncodeAPI interface supports the GUID the client wants to pass as \p encodeGUID + * parameter. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * The encode GUID whose profile GUIDs are being enumerated. + * \param [in] guidArraySize + * Number of GUIDs to be retrieved. Should be set to the number retrieved using + * ::NvEncGetEncodeProfileGUIDCount. + * \param [out] profileGUIDs + * Array of supported Encode Profile GUIDs + * \param [out] GUIDCount + * Number of valid encode profile GUIDs in \p profileGUIDs array. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeProfileGUIDs (void* encoder, GUID encodeGUID, GUID* profileGUIDs, uint32_t guidArraySize, uint32_t* GUIDCount); + +// NvEncGetInputFormatCount +/** + * \brief Retrieve the number of supported Input formats. + * + * The function returns the number of supported input formats. The client must + * query the NvEncodeAPI interface to determine the supported input formats + * before creating the input surfaces. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the number of supported input formats + * is to be retrieved. + * \param [out] inputFmtCount + * Number of input formats supported for specified Encode GUID. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + */ +NVENCSTATUS NVENCAPI NvEncGetInputFormatCount (void* encoder, GUID encodeGUID, uint32_t* inputFmtCount); + + +// NvEncGetInputFormats +/** + * \brief Retrieves an array of supported Input formats + * + * Returns an array of supported input formats The client must use the input + * format to create input surface using ::NvEncCreateInputBuffer() API. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the number of supported input formats + * is to be retrieved. + *\param [in] inputFmtArraySize + * Size input format count array passed in \p inputFmts. + *\param [out] inputFmts + * Array of input formats supported for this Encode GUID. + *\param [out] inputFmtCount + * The number of valid input format types returned by the NvEncodeAPI + * interface in \p inputFmts array. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetInputFormats (void* encoder, GUID encodeGUID, NV_ENC_BUFFER_FORMAT* inputFmts, uint32_t inputFmtArraySize, uint32_t* inputFmtCount); + + +// NvEncGetEncodeCaps +/** + * \brief Retrieves the capability value for a specified encoder attribute. + * + * The function returns the capability value for a given encoder attribute. The + * client must validate the encodeGUID using ::NvEncGetEncodeGUIDs() API before + * calling this function. The encoder attribute being queried are enumerated in + * ::NV_ENC_CAPS_PARAM enum. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the capability attribute is to be retrieved. + * \param [in] capsParam + * Used to specify attribute being queried. Refer ::NV_ENC_CAPS_PARAM for more + * details. + * \param [out] capsVal + * The value corresponding to the capability attribute being queried. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeCaps (void* encoder, GUID encodeGUID, NV_ENC_CAPS_PARAM* capsParam, int* capsVal); + + +// NvEncGetEncodePresetCount +/** + * \brief Retrieves the number of supported preset GUIDs. + * + * The function returns the number of preset GUIDs available for a given codec. + * The client must validate the codec GUID using ::NvEncGetEncodeGUIDs() API + * before calling this function. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the number of supported presets is to + * be retrieved. + * \param [out] encodePresetGUIDCount + * Receives the number of supported preset GUIDs. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodePresetCount (void* encoder, GUID encodeGUID, uint32_t* encodePresetGUIDCount); + + +// NvEncGetEncodePresetGUIDs +/** + * \brief Receives an array of supported encoder preset GUIDs. + * + * The function returns an array of encode preset GUIDs available for a given codec. + * The client can directly use one of the preset GUIDs based upon the use case + * or target device. The preset GUID chosen can be directly used in + * NV_ENC_INITIALIZE_PARAMS::presetGUID parameter to ::NvEncEncodePicture() API. + * Alternately client can also use the preset GUID to retrieve the encoding config + * parameters being used by NvEncodeAPI interface for that given preset, using + * ::NvEncGetEncodePresetConfig() API. It can then modify preset config parameters + * as per its use case and send it to NvEncodeAPI interface as part of + * NV_ENC_INITIALIZE_PARAMS::encodeConfig parameter for NvEncInitializeEncoder() + * API. + * + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the list of supported presets is to be + * retrieved. + * \param [in] guidArraySize + * Size of array of preset GUIDs passed in \p preset GUIDs + * \param [out] presetGUIDs + * Array of supported Encode preset GUIDs from the NvEncodeAPI interface + * to client. + * \param [out] encodePresetGUIDCount + * Receives the number of preset GUIDs returned by the NvEncodeAPI + * interface. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodePresetGUIDs (void* encoder, GUID encodeGUID, GUID* presetGUIDs, uint32_t guidArraySize, uint32_t* encodePresetGUIDCount); + + +// NvEncGetEncodePresetConfig +/** + * \brief Returns a preset config structure supported for given preset GUID. + * + * The function returns a preset config structure for a given preset GUID. Before + * using this function the client must enumerate the preset GUIDs available for + * a given codec. The preset config structure can be modified by the client depending + * upon its use case and can be then used to initialize the encoder using + * ::NvEncInitializeEncoder() API. The client can use this function only if it + * wants to modify the NvEncodeAPI preset configuration, otherwise it can + * directly use the preset GUID. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the list of supported presets is to be + * retrieved. + * \param [in] presetGUID + * Preset GUID, corresponding to which the Encoding configurations is to be + * retrieved. + * \param [out] presetConfig + * The requested Preset Encoder Attribute set. Refer ::_NV_ENC_CONFIG for +* more details. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodePresetConfig (void* encoder, GUID encodeGUID, GUID presetGUID, NV_ENC_PRESET_CONFIG* presetConfig); + +// NvEncGetEncodePresetConfigEx +/** + * \brief Returns a preset config structure supported for given preset GUID. + * + * The function returns a preset config structure for a given preset GUID and tuning info. + * NvEncGetEncodePresetConfigEx() API is not applicable to H264 and HEVC meonly mode. + * Before using this function the client must enumerate the preset GUIDs available for + * a given codec. The preset config structure can be modified by the client depending + * upon its use case and can be then used to initialize the encoder using + * ::NvEncInitializeEncoder() API. The client can use this function only if it + * wants to modify the NvEncodeAPI preset configuration, otherwise it can + * directly use the preset GUID. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encodeGUID + * Encode GUID, corresponding to which the list of supported presets is to be + * retrieved. + * \param [in] presetGUID + * Preset GUID, corresponding to which the Encoding configurations is to be + * retrieved. + * \param [in] tuningInfo + * tuning info, corresponding to which the Encoding configurations is to be + * retrieved. + * \param [out] presetConfig + * The requested Preset Encoder Attribute set. Refer ::_NV_ENC_CONFIG for + * more details. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodePresetConfigEx (void* encoder, GUID encodeGUID, GUID presetGUID, NV_ENC_TUNING_INFO tuningInfo, NV_ENC_PRESET_CONFIG* presetConfig); + +// NvEncInitializeEncoder +/** + * \brief Initialize the encoder. + * + * This API must be used to initialize the encoder. The initialization parameter + * is passed using \p *createEncodeParams The client must send the following + * fields of the _NV_ENC_INITIALIZE_PARAMS structure with a valid value. + * - NV_ENC_INITIALIZE_PARAMS::encodeGUID + * - NV_ENC_INITIALIZE_PARAMS::encodeWidth + * - NV_ENC_INITIALIZE_PARAMS::encodeHeight + * + * The client can pass a preset GUID directly to the NvEncodeAPI interface using + * NV_ENC_INITIALIZE_PARAMS::presetGUID field. If the client doesn't pass + * NV_ENC_INITIALIZE_PARAMS::encodeConfig structure, the codec specific parameters + * will be selected based on the preset GUID. The preset GUID must have been + * validated by the client using ::NvEncGetEncodePresetGUIDs() API. + * If the client passes a custom ::_NV_ENC_CONFIG structure through + * NV_ENC_INITIALIZE_PARAMS::encodeConfig , it will override the codec specific parameters + * based on the preset GUID. It is recommended that even if the client passes a custom config, + * it should also send a preset GUID. In this case, the preset GUID passed by the client + * will not override any of the custom config parameters programmed by the client, + * it is only used as a hint by the NvEncodeAPI interface to determine certain encoder parameters + * which are not exposed to the client. + * + * There are two modes of operation for the encoder namely: + * - Asynchronous mode + * - Synchronous mode + * + * The client can select asynchronous or synchronous mode by setting the \p + * enableEncodeAsync field in ::_NV_ENC_INITIALIZE_PARAMS to 1 or 0 respectively. + *\par Asynchronous mode of operation: + * The Asynchronous mode can be enabled by setting NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 1. + * The client operating in asynchronous mode must allocate completion event object + * for each output buffer and pass the completion event object in the + * ::NvEncEncodePicture() API. The client can create another thread and wait on + * the event object to be signaled by NvEncodeAPI interface on completion of the + * encoding process for the output frame. This should unblock the main thread from + * submitting work to the encoder. When the event is signaled the client can call + * NvEncodeAPI interfaces to copy the bitstream data using ::NvEncLockBitstream() + * API. This is the preferred mode of operation. + * + * NOTE: Asynchronous mode is not supported on Linux. + * + *\par Synchronous mode of operation: + * The client can select synchronous mode by setting NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 0. + * The client working in synchronous mode can work in a single threaded or multi + * threaded mode. The client need not allocate any event objects. The client can + * only lock the bitstream data after NvEncodeAPI interface has returned + * ::NV_ENC_SUCCESS from encode picture. The NvEncodeAPI interface can return + * ::NV_ENC_ERR_NEED_MORE_INPUT error code from ::NvEncEncodePicture() API. The + * client must not lock the output buffer in such case but should send the next + * frame for encoding. The client must keep on calling ::NvEncEncodePicture() API + * until it returns ::NV_ENC_SUCCESS. \n + * The client must always lock the bitstream data in order in which it has submitted. + * This is true for both asynchronous and synchronous mode. + * + *\par Picture type decision: + * If the client is taking the picture type decision and it must disable the picture + * type decision module in NvEncodeAPI by setting NV_ENC_INITIALIZE_PARAMS::enablePTD + * to 0. In this case the client is required to send the picture in encoding + * order to NvEncodeAPI by doing the re-ordering for B frames. \n + * If the client doesn't want to take the picture type decision it can enable + * picture type decision module in the NvEncodeAPI interface by setting + * NV_ENC_INITIALIZE_PARAMS::enablePTD to 1 and send the input pictures in display + * order. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] createEncodeParams + * Refer ::_NV_ENC_INITIALIZE_PARAMS for details. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncInitializeEncoder (void* encoder, NV_ENC_INITIALIZE_PARAMS* createEncodeParams); + + +// NvEncCreateInputBuffer +/** + * \brief Allocates Input buffer. + * + * This function is used to allocate an input buffer. The client must enumerate + * the input buffer format before allocating the input buffer resources. The + * NV_ENC_INPUT_PTR returned by the NvEncodeAPI interface in the + * NV_ENC_CREATE_INPUT_BUFFER::inputBuffer field can be directly used in + * ::NvEncEncodePicture() API. The number of input buffers to be allocated by the + * client must be at least 4 more than the number of B frames being used for encoding. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] createInputBufferParams + * Pointer to the ::NV_ENC_CREATE_INPUT_BUFFER structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncCreateInputBuffer (void* encoder, NV_ENC_CREATE_INPUT_BUFFER* createInputBufferParams); + + +// NvEncDestroyInputBuffer +/** + * \brief Release an input buffers. + * + * This function is used to free an input buffer. If the client has allocated + * any input buffer using ::NvEncCreateInputBuffer() API, it must free those + * input buffers by calling this function. The client must release the input + * buffers before destroying the encoder using ::NvEncDestroyEncoder() API. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] inputBuffer + * Pointer to the input buffer to be released. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncDestroyInputBuffer (void* encoder, NV_ENC_INPUT_PTR inputBuffer); + +// NvEncSetIOCudaStreams +/** + * \brief Set input and output CUDA stream for specified encoder attribute. + * + * Encoding may involve CUDA pre-processing on the input and post-processing on encoded output. + * This function is used to set input and output CUDA streams to pipeline the CUDA pre-processing + * and post-processing tasks. Clients should call this function before the call to + * NvEncUnlockInputBuffer(). If this function is not called, the default CUDA stream is used for + * input and output processing. After a successful call to this function, the streams specified + * in that call will replace the previously-used streams. + * This API is supported for NVCUVID interface only. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] inputStream + * Pointer to CUstream which is used to process ::NV_ENC_PIC_PARAMS::inputFrame for encode. + * In case of ME-only mode, inputStream is used to process ::NV_ENC_MEONLY_PARAMS::inputBuffer and + * ::NV_ENC_MEONLY_PARAMS::referenceFrame + * \param [in] outputStream + * Pointer to CUstream which is used to process ::NV_ENC_PIC_PARAMS::outputBuffer for encode. + * In case of ME-only mode, outputStream is used to process ::NV_ENC_MEONLY_PARAMS::mvBuffer + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + */ +NVENCSTATUS NVENCAPI NvEncSetIOCudaStreams (void* encoder, NV_ENC_CUSTREAM_PTR inputStream, NV_ENC_CUSTREAM_PTR outputStream); + + +// NvEncCreateBitstreamBuffer +/** + * \brief Allocates an output bitstream buffer + * + * This function is used to allocate an output bitstream buffer and returns a + * NV_ENC_OUTPUT_PTR to bitstream buffer to the client in the + * NV_ENC_CREATE_BITSTREAM_BUFFER::bitstreamBuffer field. + * The client can only call this function after the encoder session has been + * initialized using ::NvEncInitializeEncoder() API. The minimum number of output + * buffers allocated by the client must be at least 4 more than the number of B + * B frames being used for encoding. The client can only access the output + * bitstream data by locking the \p bitstreamBuffer using the ::NvEncLockBitstream() + * function. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] createBitstreamBufferParams + * Pointer ::NV_ENC_CREATE_BITSTREAM_BUFFER for details. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncCreateBitstreamBuffer (void* encoder, NV_ENC_CREATE_BITSTREAM_BUFFER* createBitstreamBufferParams); + + +// NvEncDestroyBitstreamBuffer +/** + * \brief Release a bitstream buffer. + * + * This function is used to release the output bitstream buffer allocated using + * the ::NvEncCreateBitstreamBuffer() function. The client must release the output + * bitstreamBuffer using this function before destroying the encoder session. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] bitstreamBuffer + * Pointer to the bitstream buffer being released. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncDestroyBitstreamBuffer (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer); + +// NvEncEncodePicture +/** + * \brief Submit an input picture for encoding. + * + * This function is used to submit an input picture buffer for encoding. The + * encoding parameters are passed using \p *encodePicParams which is a pointer + * to the ::_NV_ENC_PIC_PARAMS structure. + * + * If the client has set NV_ENC_INITIALIZE_PARAMS::enablePTD to 0, then it must + * send a valid value for the following fields. + * - NV_ENC_PIC_PARAMS::pictureType + * - NV_ENC_PIC_PARAMS_H264::displayPOCSyntax (H264 only) + * - NV_ENC_PIC_PARAMS_H264::frameNumSyntax(H264 only) + * - NV_ENC_PIC_PARAMS_H264::refPicFlag(H264 only) + * + *\par MVC Encoding: + * For MVC encoding the client must call encode picture API for each view separately + * and must pass valid view id in NV_ENC_PIC_PARAMS_MVC::viewID field. Currently + * NvEncodeAPI only support stereo MVC so client must send viewID as 0 for base + * view and view ID as 1 for dependent view. + * + *\par Asynchronous Encoding + * If the client has enabled asynchronous mode of encoding by setting + * NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 1 in the ::NvEncInitializeEncoder() + * API ,then the client must send a valid NV_ENC_PIC_PARAMS::completionEvent. + * Incase of asynchronous mode of operation, client can queue the ::NvEncEncodePicture() + * API commands from the main thread and then queue output buffers to be processed + * to a secondary worker thread. Before the locking the output buffers in the + * secondary thread , the client must wait on NV_ENC_PIC_PARAMS::completionEvent + * it has queued in ::NvEncEncodePicture() API call. The client must always process + * completion event and the output buffer in the same order in which they have been + * submitted for encoding. The NvEncodeAPI interface is responsible for any + * re-ordering required for B frames and will always ensure that encoded bitstream + * data is written in the same order in which output buffer is submitted. + * The NvEncodeAPI interface may return ::NV_ENC_ERR_NEED_MORE_INPUT error code for + * some ::NvEncEncodePicture() API calls but the client must not treat it as a fatal error. + * The NvEncodeAPI interface might not be able to submit an input picture buffer for encoding + * immediately due to re-ordering for B frames. + *\code + The below example shows how asynchronous encoding in case of 1 B frames + ------------------------------------------------------------------------ + Suppose the client allocated 4 input buffers(I1,I2..), 4 output buffers(O1,O2..) + and 4 completion events(E1, E2, ...). The NvEncodeAPI interface will need to + keep a copy of the input buffers for re-ordering and it allocates following + internal buffers (NvI1, NvI2...). These internal buffers are managed by NvEncodeAPI + and the client is not responsible for the allocating or freeing the memory of + the internal buffers. + + a) The client main thread will queue the following encode frame calls. + Note the picture type is unknown to the client, the decision is being taken by + NvEncodeAPI interface. The client should pass ::_NV_ENC_PIC_PARAMS parameter + consisting of allocated input buffer, output buffer and output events in successive + ::NvEncEncodePicture() API calls along with other required encode picture params. + For example: + 1st EncodePicture parameters - (I1, O1, E1) + 2nd EncodePicture parameters - (I2, O2, E2) + 3rd EncodePicture parameters - (I3, O3, E3) + + b) NvEncodeAPI SW will receive the following encode Commands from the client. + The left side shows input from client in the form (Input buffer, Output Buffer, + Output Event). The right hand side shows a possible picture type decision take by + the NvEncodeAPI interface. + (I1, O1, E1) ---P1 Frame + (I2, O2, E2) ---B2 Frame + (I3, O3, E3) ---P3 Frame + + c) NvEncodeAPI interface will make a copy of the input buffers to its internal + buffers for re-ordering. These copies are done as part of nvEncEncodePicture + function call from the client and NvEncodeAPI interface is responsible for + synchronization of copy operation with the actual encoding operation. + I1 --> NvI1 + I2 --> NvI2 + I3 --> NvI3 + + d) The NvEncodeAPI encodes I1 as P frame and submits I1 to encoder HW and returns ::NV_ENC_SUCCESS. + The NvEncodeAPI tries to encode I2 as B frame and fails with ::NV_ENC_ERR_NEED_MORE_INPUT error code. + The error is not fatal and it notifies client that I2 is not submitted to encoder immediately. + The NvEncodeAPI encodes I3 as P frame and submits I3 for encoding which will be used as backward + reference frame for I2. The NvEncodeAPI then submits I2 for encoding and returns ::NV_ENC_SUCESS. + Both the submission are part of the same ::NvEncEncodePicture() function call. + + e) After returning from ::NvEncEncodePicture() call , the client must queue the output + bitstream processing work to the secondary thread. The output bitstream processing + for asynchronous mode consist of first waiting on completion event(E1, E2..) + and then locking the output bitstream buffer(O1, O2..) for reading the encoded + data. The work queued to the secondary thread by the client is in the following order + (I1, O1, E1) + (I2, O2, E2) + (I3, O3, E3) + Note they are in the same order in which client calls ::NvEncEncodePicture() API + in \p step a). + + f) NvEncodeAPI interface will do the re-ordering such that Encoder HW will receive + the following encode commands: + (NvI1, O1, E1) ---P1 Frame + (NvI3, O2, E2) ---P3 Frame + (NvI2, O3, E3) ---B2 frame + + g) After the encoding operations are completed, the events will be signaled + by NvEncodeAPI interface in the following order : + (O1, E1) ---P1 Frame ,output bitstream copied to O1 and event E1 signaled. + (O2, E2) ---P3 Frame ,output bitstream copied to O2 and event E2 signaled. + (O3, E3) ---B2 Frame ,output bitstream copied to O3 and event E3 signaled. + + h) The client must lock the bitstream data using ::NvEncLockBitstream() API in + the order O1,O2,O3 to read the encoded data, after waiting for the events + to be signaled in the same order i.e E1, E2 and E3.The output processing is + done in the secondary thread in the following order: + Waits on E1, copies encoded bitstream from O1 + Waits on E2, copies encoded bitstream from O2 + Waits on E3, copies encoded bitstream from O3 + + -Note the client will receive the events signaling and output buffer in the + same order in which they have submitted for encoding. + -Note the LockBitstream will have picture type field which will notify the + output picture type to the clients. + -Note the input, output buffer and the output completion event are free to be + reused once NvEncodeAPI interfaced has signaled the event and the client has + copied the data from the output buffer. + + * \endcode + * + *\par Synchronous Encoding + * The client can enable synchronous mode of encoding by setting + * NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 0 in ::NvEncInitializeEncoder() API. + * The NvEncodeAPI interface may return ::NV_ENC_ERR_NEED_MORE_INPUT error code for + * some ::NvEncEncodePicture() API calls when NV_ENC_INITIALIZE_PARAMS::enablePTD + * is set to 1, but the client must not treat it as a fatal error. The NvEncodeAPI + * interface might not be able to submit an input picture buffer for encoding + * immediately due to re-ordering for B frames. The NvEncodeAPI interface cannot + * submit the input picture which is decided to be encoded as B frame as it waits + * for backward reference from temporally subsequent frames. This input picture + * is buffered internally and waits for more input picture to arrive. The client + * must not call ::NvEncLockBitstream() API on the output buffers whose + * ::NvEncEncodePicture() API returns ::NV_ENC_ERR_NEED_MORE_INPUT. The client must + * wait for the NvEncodeAPI interface to return ::NV_ENC_SUCCESS before locking the + * output bitstreams to read the encoded bitstream data. The following example + * explains the scenario with synchronous encoding with 2 B frames. + *\code + The below example shows how synchronous encoding works in case of 1 B frames + ----------------------------------------------------------------------------- + Suppose the client allocated 4 input buffers(I1,I2..), 4 output buffers(O1,O2..) + and 4 completion events(E1, E2, ...). The NvEncodeAPI interface will need to + keep a copy of the input buffers for re-ordering and it allocates following + internal buffers (NvI1, NvI2...). These internal buffers are managed by NvEncodeAPI + and the client is not responsible for the allocating or freeing the memory of + the internal buffers. + + The client calls ::NvEncEncodePicture() API with input buffer I1 and output buffer O1. + The NvEncodeAPI decides to encode I1 as P frame and submits it to encoder + HW and returns ::NV_ENC_SUCCESS. + The client can now read the encoded data by locking the output O1 by calling + NvEncLockBitstream API. + + The client calls ::NvEncEncodePicture() API with input buffer I2 and output buffer O2. + The NvEncodeAPI decides to encode I2 as B frame and buffers I2 by copying it + to internal buffer and returns ::NV_ENC_ERR_NEED_MORE_INPUT. + The error is not fatal and it notifies client that it cannot read the encoded + data by locking the output O2 by calling ::NvEncLockBitstream() API without submitting + more work to the NvEncodeAPI interface. + + The client calls ::NvEncEncodePicture() with input buffer I3 and output buffer O3. + The NvEncodeAPI decides to encode I3 as P frame and it first submits I3 for + encoding which will be used as backward reference frame for I2. + The NvEncodeAPI then submits I2 for encoding and returns ::NV_ENC_SUCESS. Both + the submission are part of the same ::NvEncEncodePicture() function call. + The client can now read the encoded data for both the frames by locking the output + O2 followed by O3 ,by calling ::NvEncLockBitstream() API. + + The client must always lock the output in the same order in which it has submitted + to receive the encoded bitstream in correct encoding order. + + * \endcode + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] encodePicParams + * Pointer to the ::_NV_ENC_PIC_PARAMS structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_ENCODER_BUSY \n + * ::NV_ENC_ERR_NEED_MORE_INPUT \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncEncodePicture (void* encoder, NV_ENC_PIC_PARAMS* encodePicParams); + + +// NvEncLockBitstream +/** + * \brief Lock output bitstream buffer + * + * This function is used to lock the bitstream buffer to read the encoded data. + * The client can only access the encoded data by calling this function. + * The pointer to client accessible encoded data is returned in the + * NV_ENC_LOCK_BITSTREAM::bitstreamBufferPtr field. The size of the encoded data + * in the output buffer is returned in the NV_ENC_LOCK_BITSTREAM::bitstreamSizeInBytes + * The NvEncodeAPI interface also returns the output picture type and picture structure + * of the encoded frame in NV_ENC_LOCK_BITSTREAM::pictureType and + * NV_ENC_LOCK_BITSTREAM::pictureStruct fields respectively. If the client has + * set NV_ENC_LOCK_BITSTREAM::doNotWait to 1, the function might return + * ::NV_ENC_ERR_LOCK_BUSY if client is operating in synchronous mode. This is not + * a fatal failure if NV_ENC_LOCK_BITSTREAM::doNotWait is set to 1. In the above case the client can + * retry the function after few milliseconds. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] lockBitstreamBufferParams + * Pointer to the ::_NV_ENC_LOCK_BITSTREAM structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_LOCK_BUSY \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncLockBitstream (void* encoder, NV_ENC_LOCK_BITSTREAM* lockBitstreamBufferParams); + + +// NvEncUnlockBitstream +/** + * \brief Unlock the output bitstream buffer + * + * This function is used to unlock the output bitstream buffer after the client + * has read the encoded data from output buffer. The client must call this function + * to unlock the output buffer which it has previously locked using ::NvEncLockBitstream() + * function. Using a locked bitstream buffer in ::NvEncEncodePicture() API will cause + * the function to fail. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] bitstreamBuffer + * bitstream buffer pointer being unlocked + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncUnlockBitstream (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer); + + +// NvLockInputBuffer +/** + * \brief Locks an input buffer + * + * This function is used to lock the input buffer to load the uncompressed YUV + * pixel data into input buffer memory. The client must pass the NV_ENC_INPUT_PTR + * it had previously allocated using ::NvEncCreateInputBuffer()in the + * NV_ENC_LOCK_INPUT_BUFFER::inputBuffer field. + * The NvEncodeAPI interface returns pointer to client accessible input buffer + * memory in NV_ENC_LOCK_INPUT_BUFFER::bufferDataPtr field. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] lockInputBufferParams + * Pointer to the ::_NV_ENC_LOCK_INPUT_BUFFER structure + * + * \return + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_LOCK_BUSY \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncLockInputBuffer (void* encoder, NV_ENC_LOCK_INPUT_BUFFER* lockInputBufferParams); + + +// NvUnlockInputBuffer +/** + * \brief Unlocks the input buffer + * + * This function is used to unlock the input buffer memory previously locked for + * uploading YUV pixel data. The input buffer must be unlocked before being used + * again for encoding, otherwise NvEncodeAPI will fail the ::NvEncEncodePicture() + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] inputBuffer + * Pointer to the input buffer that is being unlocked. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + * + */ +NVENCSTATUS NVENCAPI NvEncUnlockInputBuffer (void* encoder, NV_ENC_INPUT_PTR inputBuffer); + + +// NvEncGetEncodeStats +/** + * \brief Get encoding statistics. + * + * This function is used to retrieve the encoding statistics. + * This API is not supported when encode device type is CUDA. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] encodeStats + * Pointer to the ::_NV_ENC_STAT structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetEncodeStats (void* encoder, NV_ENC_STAT* encodeStats); + + +// NvEncGetSequenceParams +/** + * \brief Get encoded sequence and picture header. + * + * This function can be used to retrieve the sequence and picture header out of + * band. The client must call this function only after the encoder has been + * initialized using ::NvEncInitializeEncoder() function. The client must + * allocate the memory where the NvEncodeAPI interface can copy the bitstream + * header and pass the pointer to the memory in NV_ENC_SEQUENCE_PARAM_PAYLOAD::spsppsBuffer. + * The size of buffer is passed in the field NV_ENC_SEQUENCE_PARAM_PAYLOAD::inBufferSize. + * The NvEncodeAPI interface will copy the bitstream header payload and returns + * the actual size of the bitstream header in the field + * NV_ENC_SEQUENCE_PARAM_PAYLOAD::outSPSPPSPayloadSize. + * The client must call ::NvEncGetSequenceParams() function from the same thread which is + * being used to call ::NvEncEncodePicture() function. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] sequenceParamPayload + * Pointer to the ::_NV_ENC_SEQUENCE_PARAM_PAYLOAD structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetSequenceParams (void* encoder, NV_ENC_SEQUENCE_PARAM_PAYLOAD* sequenceParamPayload); + +// NvEncGetSequenceParamEx +/** + * \brief Get sequence and picture header. + * + * This function can be used to retrieve the sequence and picture header out of band, even when + * encoder has not been initialized using ::NvEncInitializeEncoder() function. + * The client must allocate the memory where the NvEncodeAPI interface can copy the bitstream + * header and pass the pointer to the memory in NV_ENC_SEQUENCE_PARAM_PAYLOAD::spsppsBuffer. + * The size of buffer is passed in the field NV_ENC_SEQUENCE_PARAM_PAYLOAD::inBufferSize. + * If encoder has not been initialized using ::NvEncInitializeEncoder() function, client must + * send NV_ENC_INITIALIZE_PARAMS as input. The NV_ENC_INITIALIZE_PARAMS passed must be same as the + * one which will be used for initializing encoder using ::NvEncInitializeEncoder() function later. + * If encoder is already initialized using ::NvEncInitializeEncoder() function, the provided + * NV_ENC_INITIALIZE_PARAMS structure is ignored. The NvEncodeAPI interface will copy the bitstream + * header payload and returns the actual size of the bitstream header in the field + * NV_ENC_SEQUENCE_PARAM_PAYLOAD::outSPSPPSPayloadSize. The client must call ::NvEncGetSequenceParamsEx() + * function from the same thread which is being used to call ::NvEncEncodePicture() function. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] encInitParams + * Pointer to the _NV_ENC_INITIALIZE_PARAMS structure. + * \param [in,out] sequenceParamPayload + * Pointer to the ::_NV_ENC_SEQUENCE_PARAM_PAYLOAD structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncGetSequenceParamEx (void* encoder, NV_ENC_INITIALIZE_PARAMS* encInitParams, NV_ENC_SEQUENCE_PARAM_PAYLOAD* sequenceParamPayload); + +// NvEncRegisterAsyncEvent +/** + * \brief Register event for notification to encoding completion. + * + * This function is used to register the completion event with NvEncodeAPI + * interface. The event is required when the client has configured the encoder to + * work in asynchronous mode. In this mode the client needs to send a completion + * event with every output buffer. The NvEncodeAPI interface will signal the + * completion of the encoding process using this event. Only after the event is + * signaled the client can get the encoded data using ::NvEncLockBitstream() function. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] eventParams + * Pointer to the ::_NV_ENC_EVENT_PARAMS structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncRegisterAsyncEvent (void* encoder, NV_ENC_EVENT_PARAMS* eventParams); + + +// NvEncUnregisterAsyncEvent +/** + * \brief Unregister completion event. + * + * This function is used to unregister completion event which has been previously + * registered using ::NvEncRegisterAsyncEvent() function. The client must unregister + * all events before destroying the encoder using ::NvEncDestroyEncoder() function. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] eventParams + * Pointer to the ::_NV_ENC_EVENT_PARAMS structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncUnregisterAsyncEvent (void* encoder, NV_ENC_EVENT_PARAMS* eventParams); + + +// NvEncMapInputResource +/** + * \brief Map an externally created input resource pointer for encoding. + * + * Maps an externally allocated input resource [using and returns a NV_ENC_INPUT_PTR + * which can be used for encoding in the ::NvEncEncodePicture() function. The + * mapped resource is returned in the field NV_ENC_MAP_INPUT_RESOURCE::outputResourcePtr. + * The NvEncodeAPI interface also returns the buffer format of the mapped resource + * in the field NV_ENC_MAP_INPUT_RESOURCE::outbufferFmt. + * This function provides synchronization guarantee that any graphics work submitted + * on the input buffer is completed before the buffer is used for encoding. This is + * also true for compute (i.e. CUDA) work, provided that the previous workload using + * the input resource was submitted to the default stream. + * The client should not access any input buffer while they are mapped by the encoder. + * For D3D12 interface type, this function does not provide synchronization guarantee. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] mapInputResParams + * Pointer to the ::_NV_ENC_MAP_INPUT_RESOURCE structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_RESOURCE_NOT_REGISTERED \n + * ::NV_ENC_ERR_MAP_FAILED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncMapInputResource (void* encoder, NV_ENC_MAP_INPUT_RESOURCE* mapInputResParams); + + +// NvEncUnmapInputResource +/** + * \brief UnMaps a NV_ENC_INPUT_PTR which was mapped for encoding + * + * + * UnMaps an input buffer which was previously mapped using ::NvEncMapInputResource() + * API. The mapping created using ::NvEncMapInputResource() should be invalidated + * using this API before the external resource is destroyed by the client. The client + * must unmap the buffer after ::NvEncLockBitstream() API returns successfully for encode + * work submitted using the mapped input buffer. + * + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] mappedInputBuffer + * Pointer to the NV_ENC_INPUT_PTR + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_RESOURCE_NOT_REGISTERED \n + * ::NV_ENC_ERR_RESOURCE_NOT_MAPPED \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncUnmapInputResource (void* encoder, NV_ENC_INPUT_PTR mappedInputBuffer); + +// NvEncDestroyEncoder +/** + * \brief Destroy Encoding Session + * + * Destroys the encoder session previously created using ::NvEncOpenEncodeSession() + * function. The client must flush the encoder before freeing any resources. In order + * to flush the encoder the client must pass a NULL encode picture packet and either + * wait for the ::NvEncEncodePicture() function to return in synchronous mode or wait + * for the flush event to be signaled by the encoder in asynchronous mode. + * The client must free all the input and output resources created using the + * NvEncodeAPI interface before destroying the encoder. If the client is operating + * in asynchronous mode, it must also unregister the completion events previously + * registered. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncDestroyEncoder (void* encoder); + +// NvEncInvalidateRefFrames +/** + * \brief Invalidate reference frames + * + * Invalidates reference frame based on the time stamp provided by the client. + * The encoder marks any reference frames or any frames which have been reconstructed + * using the corrupt frame as invalid for motion estimation and uses older reference + * frames for motion estimation. The encoded forces the current frame to be encoded + * as an intra frame if no reference frames are left after invalidation process. + * This is useful for low latency application for error resiliency. The client + * is recommended to set NV_ENC_CONFIG_H264::maxNumRefFrames to a large value so + * that encoder can keep a backup of older reference frames in the DPB and can use them + * for motion estimation when the newer reference frames have been invalidated. + * This API can be called multiple times. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] invalidRefFrameTimeStamp + * Timestamp of the invalid reference frames which needs to be invalidated. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncInvalidateRefFrames(void* encoder, uint64_t invalidRefFrameTimeStamp); + +// NvEncOpenEncodeSessionEx +/** + * \brief Opens an encoding session. + * + * Opens an encoding session and returns a pointer to the encoder interface in + * the \p **encoder parameter. The client should start encoding process by calling + * this API first. + * The client must pass a pointer to IDirect3DDevice9 device or CUDA context in the \p *device parameter. + * For the OpenGL interface, \p device must be NULL. An OpenGL context must be current when + * calling all NvEncodeAPI functions. + * If the creation of encoder session fails, the client must call ::NvEncDestroyEncoder API + * before exiting. + * + * \param [in] openSessionExParams + * Pointer to a ::NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS structure. + * \param [out] encoder + * Encode Session pointer to the NvEncodeAPI interface. + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_NO_ENCODE_DEVICE \n + * ::NV_ENC_ERR_UNSUPPORTED_DEVICE \n + * ::NV_ENC_ERR_INVALID_DEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncOpenEncodeSessionEx (NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS *openSessionExParams, void** encoder); + +// NvEncRegisterResource +/** + * \brief Registers a resource with the Nvidia Video Encoder Interface. + * + * Registers a resource with the Nvidia Video Encoder Interface for book keeping. + * The client is expected to pass the registered resource handle as well, while calling ::NvEncMapInputResource API. + * + * \param [in] encoder + * Pointer to the NVEncodeAPI interface. + * + * \param [in] registerResParams + * Pointer to a ::_NV_ENC_REGISTER_RESOURCE structure + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_RESOURCE_REGISTER_FAILED \n + * ::NV_ENC_ERR_GENERIC \n + * ::NV_ENC_ERR_UNIMPLEMENTED \n + * + */ +NVENCSTATUS NVENCAPI NvEncRegisterResource (void* encoder, NV_ENC_REGISTER_RESOURCE* registerResParams); + +// NvEncUnregisterResource +/** + * \brief Unregisters a resource previously registered with the Nvidia Video Encoder Interface. + * + * Unregisters a resource previously registered with the Nvidia Video Encoder Interface. + * The client is expected to unregister any resource that it has registered with the + * Nvidia Video Encoder Interface before destroying the resource. + * + * \param [in] encoder + * Pointer to the NVEncodeAPI interface. + * + * \param [in] registeredResource + * The registered resource pointer that was returned in ::NvEncRegisterResource. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_RESOURCE_NOT_REGISTERED \n + * ::NV_ENC_ERR_GENERIC \n + * ::NV_ENC_ERR_UNIMPLEMENTED \n + * + */ +NVENCSTATUS NVENCAPI NvEncUnregisterResource (void* encoder, NV_ENC_REGISTERED_PTR registeredResource); + +// NvEncReconfigureEncoder +/** + * \brief Reconfigure an existing encoding session. + * + * Reconfigure an existing encoding session. + * The client should call this API to change/reconfigure the parameter passed during + * NvEncInitializeEncoder API call. + * Currently Reconfiguration of following are not supported. + * Change in GOP structure. + * Change in sync-Async mode. + * Change in MaxWidth & MaxHeight. + * Change in PTD mode. + * + * Resolution change is possible only if maxEncodeWidth & maxEncodeHeight of NV_ENC_INITIALIZE_PARAMS + * is set while creating encoder session. + * + * \param [in] encoder + * Pointer to the NVEncodeAPI interface. + * + * \param [in] reInitEncodeParams + * Pointer to a ::NV_ENC_RECONFIGURE_PARAMS structure. + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_NO_ENCODE_DEVICE \n + * ::NV_ENC_ERR_UNSUPPORTED_DEVICE \n + * ::NV_ENC_ERR_INVALID_DEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_GENERIC \n + * + */ +NVENCSTATUS NVENCAPI NvEncReconfigureEncoder (void *encoder, NV_ENC_RECONFIGURE_PARAMS* reInitEncodeParams); + + + +// NvEncCreateMVBuffer +/** + * \brief Allocates output MV buffer for ME only mode. + * + * This function is used to allocate an output MV buffer. The size of the mvBuffer is + * dependent on the frame height and width of the last ::NvEncCreateInputBuffer() call. + * The NV_ENC_OUTPUT_PTR returned by the NvEncodeAPI interface in the + * ::NV_ENC_CREATE_MV_BUFFER::mvBuffer field should be used in + * ::NvEncRunMotionEstimationOnly() API. + * Client must lock ::NV_ENC_CREATE_MV_BUFFER::mvBuffer using ::NvEncLockBitstream() API to get the motion vector data. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in,out] createMVBufferParams + * Pointer to the ::NV_ENC_CREATE_MV_BUFFER structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_GENERIC \n + */ +NVENCSTATUS NVENCAPI NvEncCreateMVBuffer (void* encoder, NV_ENC_CREATE_MV_BUFFER* createMVBufferParams); + + +// NvEncDestroyMVBuffer +/** + * \brief Release an output MV buffer for ME only mode. + * + * This function is used to release the output MV buffer allocated using + * the ::NvEncCreateMVBuffer() function. The client must release the output + * mvBuffer using this function before destroying the encoder session. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] mvBuffer + * Pointer to the mvBuffer being released. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + */ +NVENCSTATUS NVENCAPI NvEncDestroyMVBuffer (void* encoder, NV_ENC_OUTPUT_PTR mvBuffer); + + +// NvEncRunMotionEstimationOnly +/** + * \brief Submit an input picture and reference frame for motion estimation in ME only mode. + * + * This function is used to submit the input frame and reference frame for motion + * estimation. The ME parameters are passed using *meOnlyParams which is a pointer + * to ::_NV_ENC_MEONLY_PARAMS structure. + * Client must lock ::NV_ENC_CREATE_MV_BUFFER::mvBuffer using ::NvEncLockBitstream() API to get the motion vector data. + * to get motion vector data. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * \param [in] meOnlyParams + * Pointer to the ::_NV_ENC_MEONLY_PARAMS structure. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n + * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n + * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n + * ::NV_ENC_ERR_OUT_OF_MEMORY \n + * ::NV_ENC_ERR_INVALID_PARAM \n + * ::NV_ENC_ERR_INVALID_VERSION \n + * ::NV_ENC_ERR_NEED_MORE_INPUT \n + * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n + * ::NV_ENC_ERR_GENERIC \n + */ +NVENCSTATUS NVENCAPI NvEncRunMotionEstimationOnly (void* encoder, NV_ENC_MEONLY_PARAMS* meOnlyParams); + +// NvEncodeAPIGetMaxSupportedVersion +/** + * \brief Get the largest NvEncodeAPI version supported by the driver. + * + * This function can be used by clients to determine if the driver supports + * the NvEncodeAPI header the application was compiled with. + * + * \param [out] version + * Pointer to the requested value. The 4 least significant bits in the returned + * indicate the minor version and the rest of the bits indicate the major + * version of the largest supported version. + * + * \return + * ::NV_ENC_SUCCESS \n + * ::NV_ENC_ERR_INVALID_PTR \n + */ +NVENCSTATUS NVENCAPI NvEncodeAPIGetMaxSupportedVersion (uint32_t* version); + + +// NvEncGetLastErrorString +/** + * \brief Get the description of the last error reported by the API. + * + * This function returns a null-terminated string that can be used by clients to better understand the reason + * for failure of a previous API call. + * + * \param [in] encoder + * Pointer to the NvEncodeAPI interface. + * + * \return + * Pointer to buffer containing the details of the last error encountered by the API. + */ +const char * NVENCAPI NvEncGetLastErrorString (void* encoder); + + +/// \cond API PFN +/* + * Defines API function pointers + */ +typedef NVENCSTATUS (NVENCAPI* PNVENCOPENENCODESESSION) (void* device, uint32_t deviceType, void** encoder); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEGUIDCOUNT) (void* encoder, uint32_t* encodeGUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEGUIDS) (void* encoder, GUID* GUIDs, uint32_t guidArraySize, uint32_t* GUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPROFILEGUIDCOUNT) (void* encoder, GUID encodeGUID, uint32_t* encodeProfileGUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPROFILEGUIDS) (void* encoder, GUID encodeGUID, GUID* profileGUIDs, uint32_t guidArraySize, uint32_t* GUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETINPUTFORMATCOUNT) (void* encoder, GUID encodeGUID, uint32_t* inputFmtCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETINPUTFORMATS) (void* encoder, GUID encodeGUID, NV_ENC_BUFFER_FORMAT* inputFmts, uint32_t inputFmtArraySize, uint32_t* inputFmtCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODECAPS) (void* encoder, GUID encodeGUID, NV_ENC_CAPS_PARAM* capsParam, int* capsVal); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPRESETCOUNT) (void* encoder, GUID encodeGUID, uint32_t* encodePresetGUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPRESETGUIDS) (void* encoder, GUID encodeGUID, GUID* presetGUIDs, uint32_t guidArraySize, uint32_t* encodePresetGUIDCount); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPRESETCONFIG) (void* encoder, GUID encodeGUID, GUID presetGUID, NV_ENC_PRESET_CONFIG* presetConfig); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPRESETCONFIGEX) (void* encoder, GUID encodeGUID, GUID presetGUID, NV_ENC_TUNING_INFO tuningInfo, NV_ENC_PRESET_CONFIG* presetConfig); +typedef NVENCSTATUS (NVENCAPI* PNVENCINITIALIZEENCODER) (void* encoder, NV_ENC_INITIALIZE_PARAMS* createEncodeParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCCREATEINPUTBUFFER) (void* encoder, NV_ENC_CREATE_INPUT_BUFFER* createInputBufferParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYINPUTBUFFER) (void* encoder, NV_ENC_INPUT_PTR inputBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCCREATEBITSTREAMBUFFER) (void* encoder, NV_ENC_CREATE_BITSTREAM_BUFFER* createBitstreamBufferParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYBITSTREAMBUFFER) (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCENCODEPICTURE) (void* encoder, NV_ENC_PIC_PARAMS* encodePicParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCLOCKBITSTREAM) (void* encoder, NV_ENC_LOCK_BITSTREAM* lockBitstreamBufferParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCUNLOCKBITSTREAM) (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCLOCKINPUTBUFFER) (void* encoder, NV_ENC_LOCK_INPUT_BUFFER* lockInputBufferParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCUNLOCKINPUTBUFFER) (void* encoder, NV_ENC_INPUT_PTR inputBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODESTATS) (void* encoder, NV_ENC_STAT* encodeStats); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETSEQUENCEPARAMS) (void* encoder, NV_ENC_SEQUENCE_PARAM_PAYLOAD* sequenceParamPayload); +typedef NVENCSTATUS (NVENCAPI* PNVENCREGISTERASYNCEVENT) (void* encoder, NV_ENC_EVENT_PARAMS* eventParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCUNREGISTERASYNCEVENT) (void* encoder, NV_ENC_EVENT_PARAMS* eventParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCMAPINPUTRESOURCE) (void* encoder, NV_ENC_MAP_INPUT_RESOURCE* mapInputResParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCUNMAPINPUTRESOURCE) (void* encoder, NV_ENC_INPUT_PTR mappedInputBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYENCODER) (void* encoder); +typedef NVENCSTATUS (NVENCAPI* PNVENCINVALIDATEREFFRAMES) (void* encoder, uint64_t invalidRefFrameTimeStamp); +typedef NVENCSTATUS (NVENCAPI* PNVENCOPENENCODESESSIONEX) (NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS *openSessionExParams, void** encoder); +typedef NVENCSTATUS (NVENCAPI* PNVENCREGISTERRESOURCE) (void* encoder, NV_ENC_REGISTER_RESOURCE* registerResParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCUNREGISTERRESOURCE) (void* encoder, NV_ENC_REGISTERED_PTR registeredRes); +typedef NVENCSTATUS (NVENCAPI* PNVENCRECONFIGUREENCODER) (void* encoder, NV_ENC_RECONFIGURE_PARAMS* reInitEncodeParams); + +typedef NVENCSTATUS (NVENCAPI* PNVENCCREATEMVBUFFER) (void* encoder, NV_ENC_CREATE_MV_BUFFER* createMVBufferParams); +typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYMVBUFFER) (void* encoder, NV_ENC_OUTPUT_PTR mvBuffer); +typedef NVENCSTATUS (NVENCAPI* PNVENCRUNMOTIONESTIMATIONONLY) (void* encoder, NV_ENC_MEONLY_PARAMS* meOnlyParams); +typedef const char * (NVENCAPI* PNVENCGETLASTERROR) (void* encoder); +typedef NVENCSTATUS (NVENCAPI* PNVENCSETIOCUDASTREAMS) (void* encoder, NV_ENC_CUSTREAM_PTR inputStream, NV_ENC_CUSTREAM_PTR outputStream); +typedef NVENCSTATUS (NVENCAPI* PNVENCGETSEQUENCEPARAMEX) (void* encoder, NV_ENC_INITIALIZE_PARAMS* encInitParams, NV_ENC_SEQUENCE_PARAM_PAYLOAD* sequenceParamPayload); + + +/// \endcond + + +/** @} */ /* END ENCODE_FUNC */ + +/** + * \ingroup ENCODER_STRUCTURE + * NV_ENCODE_API_FUNCTION_LIST + */ +typedef struct _NV_ENCODE_API_FUNCTION_LIST +{ + uint32_t version; /**< [in]: Client should pass NV_ENCODE_API_FUNCTION_LIST_VER. */ + uint32_t reserved; /**< [in]: Reserved and should be set to 0. */ + PNVENCOPENENCODESESSION nvEncOpenEncodeSession; /**< [out]: Client should access ::NvEncOpenEncodeSession() API through this pointer. */ + PNVENCGETENCODEGUIDCOUNT nvEncGetEncodeGUIDCount; /**< [out]: Client should access ::NvEncGetEncodeGUIDCount() API through this pointer. */ + PNVENCGETENCODEPRESETCOUNT nvEncGetEncodeProfileGUIDCount; /**< [out]: Client should access ::NvEncGetEncodeProfileGUIDCount() API through this pointer.*/ + PNVENCGETENCODEPRESETGUIDS nvEncGetEncodeProfileGUIDs; /**< [out]: Client should access ::NvEncGetEncodeProfileGUIDs() API through this pointer. */ + PNVENCGETENCODEGUIDS nvEncGetEncodeGUIDs; /**< [out]: Client should access ::NvEncGetEncodeGUIDs() API through this pointer. */ + PNVENCGETINPUTFORMATCOUNT nvEncGetInputFormatCount; /**< [out]: Client should access ::NvEncGetInputFormatCount() API through this pointer. */ + PNVENCGETINPUTFORMATS nvEncGetInputFormats; /**< [out]: Client should access ::NvEncGetInputFormats() API through this pointer. */ + PNVENCGETENCODECAPS nvEncGetEncodeCaps; /**< [out]: Client should access ::NvEncGetEncodeCaps() API through this pointer. */ + PNVENCGETENCODEPRESETCOUNT nvEncGetEncodePresetCount; /**< [out]: Client should access ::NvEncGetEncodePresetCount() API through this pointer. */ + PNVENCGETENCODEPRESETGUIDS nvEncGetEncodePresetGUIDs; /**< [out]: Client should access ::NvEncGetEncodePresetGUIDs() API through this pointer. */ + PNVENCGETENCODEPRESETCONFIG nvEncGetEncodePresetConfig; /**< [out]: Client should access ::NvEncGetEncodePresetConfig() API through this pointer. */ + PNVENCINITIALIZEENCODER nvEncInitializeEncoder; /**< [out]: Client should access ::NvEncInitializeEncoder() API through this pointer. */ + PNVENCCREATEINPUTBUFFER nvEncCreateInputBuffer; /**< [out]: Client should access ::NvEncCreateInputBuffer() API through this pointer. */ + PNVENCDESTROYINPUTBUFFER nvEncDestroyInputBuffer; /**< [out]: Client should access ::NvEncDestroyInputBuffer() API through this pointer. */ + PNVENCCREATEBITSTREAMBUFFER nvEncCreateBitstreamBuffer; /**< [out]: Client should access ::NvEncCreateBitstreamBuffer() API through this pointer. */ + PNVENCDESTROYBITSTREAMBUFFER nvEncDestroyBitstreamBuffer; /**< [out]: Client should access ::NvEncDestroyBitstreamBuffer() API through this pointer. */ + PNVENCENCODEPICTURE nvEncEncodePicture; /**< [out]: Client should access ::NvEncEncodePicture() API through this pointer. */ + PNVENCLOCKBITSTREAM nvEncLockBitstream; /**< [out]: Client should access ::NvEncLockBitstream() API through this pointer. */ + PNVENCUNLOCKBITSTREAM nvEncUnlockBitstream; /**< [out]: Client should access ::NvEncUnlockBitstream() API through this pointer. */ + PNVENCLOCKINPUTBUFFER nvEncLockInputBuffer; /**< [out]: Client should access ::NvEncLockInputBuffer() API through this pointer. */ + PNVENCUNLOCKINPUTBUFFER nvEncUnlockInputBuffer; /**< [out]: Client should access ::NvEncUnlockInputBuffer() API through this pointer. */ + PNVENCGETENCODESTATS nvEncGetEncodeStats; /**< [out]: Client should access ::NvEncGetEncodeStats() API through this pointer. */ + PNVENCGETSEQUENCEPARAMS nvEncGetSequenceParams; /**< [out]: Client should access ::NvEncGetSequenceParams() API through this pointer. */ + PNVENCREGISTERASYNCEVENT nvEncRegisterAsyncEvent; /**< [out]: Client should access ::NvEncRegisterAsyncEvent() API through this pointer. */ + PNVENCUNREGISTERASYNCEVENT nvEncUnregisterAsyncEvent; /**< [out]: Client should access ::NvEncUnregisterAsyncEvent() API through this pointer. */ + PNVENCMAPINPUTRESOURCE nvEncMapInputResource; /**< [out]: Client should access ::NvEncMapInputResource() API through this pointer. */ + PNVENCUNMAPINPUTRESOURCE nvEncUnmapInputResource; /**< [out]: Client should access ::NvEncUnmapInputResource() API through this pointer. */ + PNVENCDESTROYENCODER nvEncDestroyEncoder; /**< [out]: Client should access ::NvEncDestroyEncoder() API through this pointer. */ + PNVENCINVALIDATEREFFRAMES nvEncInvalidateRefFrames; /**< [out]: Client should access ::NvEncInvalidateRefFrames() API through this pointer. */ + PNVENCOPENENCODESESSIONEX nvEncOpenEncodeSessionEx; /**< [out]: Client should access ::NvEncOpenEncodeSession() API through this pointer. */ + PNVENCREGISTERRESOURCE nvEncRegisterResource; /**< [out]: Client should access ::NvEncRegisterResource() API through this pointer. */ + PNVENCUNREGISTERRESOURCE nvEncUnregisterResource; /**< [out]: Client should access ::NvEncUnregisterResource() API through this pointer. */ + PNVENCRECONFIGUREENCODER nvEncReconfigureEncoder; /**< [out]: Client should access ::NvEncReconfigureEncoder() API through this pointer. */ + void* reserved1; + PNVENCCREATEMVBUFFER nvEncCreateMVBuffer; /**< [out]: Client should access ::NvEncCreateMVBuffer API through this pointer. */ + PNVENCDESTROYMVBUFFER nvEncDestroyMVBuffer; /**< [out]: Client should access ::NvEncDestroyMVBuffer API through this pointer. */ + PNVENCRUNMOTIONESTIMATIONONLY nvEncRunMotionEstimationOnly; /**< [out]: Client should access ::NvEncRunMotionEstimationOnly API through this pointer. */ + PNVENCGETLASTERROR nvEncGetLastErrorString; /**< [out]: Client should access ::nvEncGetLastErrorString API through this pointer. */ + PNVENCSETIOCUDASTREAMS nvEncSetIOCudaStreams; /**< [out]: Client should access ::nvEncSetIOCudaStreams API through this pointer. */ + PNVENCGETENCODEPRESETCONFIGEX nvEncGetEncodePresetConfigEx; /**< [out]: Client should access ::NvEncGetEncodePresetConfigEx() API through this pointer. */ + PNVENCGETSEQUENCEPARAMEX nvEncGetSequenceParamEx; /**< [out]: Client should access ::NvEncGetSequenceParamEx() API through this pointer. */ + void* reserved2[277]; /**< [in]: Reserved and must be set to NULL */ +} NV_ENCODE_API_FUNCTION_LIST; + +/** Macro for constructing the version field of ::_NV_ENCODEAPI_FUNCTION_LIST. */ +#define NV_ENCODE_API_FUNCTION_LIST_VER NVENCAPI_STRUCT_VERSION(2) + +// NvEncodeAPICreateInstance +/** + * \ingroup ENCODE_FUNC + * Entry Point to the NvEncodeAPI interface. + * + * Creates an instance of the NvEncodeAPI interface, and populates the + * pFunctionList with function pointers to the API routines implemented by the + * NvEncodeAPI interface. + * + * \param [out] functionList + * + * \return + * ::NV_ENC_SUCCESS + * ::NV_ENC_ERR_INVALID_PTR + */ +NVENCSTATUS NVENCAPI NvEncodeAPICreateInstance(NV_ENCODE_API_FUNCTION_LIST *functionList); + +#ifdef __cplusplus +} +#endif + + +#endif + diff --git a/extern/nvidia/Video_Codec_SDK_11.1.5/Interface/nvcuvid.h b/extern/nvidia/Video_Codec_SDK_11.1.5/Interface/nvcuvid.h new file mode 100644 index 00000000000..d2ef3bc0c3d --- /dev/null +++ b/extern/nvidia/Video_Codec_SDK_11.1.5/Interface/nvcuvid.h @@ -0,0 +1,436 @@ +/* + * This copyright notice applies to this header file only: + * + * Copyright (c) 2010-2021 NVIDIA Corporation + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the software, and to permit persons to whom the + * software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/********************************************************************************************************************/ +//! \file nvcuvid.h +//! NVDECODE API provides video decoding interface to NVIDIA GPU devices. +//! \date 2015-2020 +//! This file contains the interface constants, structure definitions and function prototypes. +/********************************************************************************************************************/ + +#if !defined(__NVCUVID_H__) +#define __NVCUVID_H__ + +#include "cuviddec.h" + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + + +/***********************************************/ +//! +//! High-level helper APIs for video sources +//! +/***********************************************/ + +typedef void *CUvideosource; +typedef void *CUvideoparser; +typedef long long CUvideotimestamp; + + +/************************************************************************/ +//! \enum cudaVideoState +//! Video source state enums +//! Used in cuvidSetVideoSourceState and cuvidGetVideoSourceState APIs +/************************************************************************/ +typedef enum { + cudaVideoState_Error = -1, /**< Error state (invalid source) */ + cudaVideoState_Stopped = 0, /**< Source is stopped (or reached end-of-stream) */ + cudaVideoState_Started = 1 /**< Source is running and delivering data */ +} cudaVideoState; + +/************************************************************************/ +//! \enum cudaAudioCodec +//! Audio compression enums +//! Used in CUAUDIOFORMAT structure +/************************************************************************/ +typedef enum { + cudaAudioCodec_MPEG1=0, /**< MPEG-1 Audio */ + cudaAudioCodec_MPEG2, /**< MPEG-2 Audio */ + cudaAudioCodec_MP3, /**< MPEG-1 Layer III Audio */ + cudaAudioCodec_AC3, /**< Dolby Digital (AC3) Audio */ + cudaAudioCodec_LPCM, /**< PCM Audio */ + cudaAudioCodec_AAC, /**< AAC Audio */ +} cudaAudioCodec; + +/************************************************************************************************/ +//! \ingroup STRUCTS +//! \struct CUVIDEOFORMAT +//! Video format +//! Used in cuvidGetSourceVideoFormat API +/************************************************************************************************/ +typedef struct +{ + cudaVideoCodec codec; /**< OUT: Compression format */ + /** + * OUT: frame rate = numerator / denominator (for example: 30000/1001) + */ + struct { + /**< OUT: frame rate numerator (0 = unspecified or variable frame rate) */ + unsigned int numerator; + /**< OUT: frame rate denominator (0 = unspecified or variable frame rate) */ + unsigned int denominator; + } frame_rate; + unsigned char progressive_sequence; /**< OUT: 0=interlaced, 1=progressive */ + unsigned char bit_depth_luma_minus8; /**< OUT: high bit depth luma. E.g, 2 for 10-bitdepth, 4 for 12-bitdepth */ + unsigned char bit_depth_chroma_minus8; /**< OUT: high bit depth chroma. E.g, 2 for 10-bitdepth, 4 for 12-bitdepth */ + unsigned char min_num_decode_surfaces; /**< OUT: Minimum number of decode surfaces to be allocated for correct + decoding. The client can send this value in ulNumDecodeSurfaces + (in CUVIDDECODECREATEINFO structure). + This guarantees correct functionality and optimal video memory + usage but not necessarily the best performance, which depends on + the design of the overall application. The optimal number of + decode surfaces (in terms of performance and memory utilization) + should be decided by experimentation for each application, but it + cannot go below min_num_decode_surfaces. + If this value is used for ulNumDecodeSurfaces then it must be + returned to parser during sequence callback. */ + unsigned int coded_width; /**< OUT: coded frame width in pixels */ + unsigned int coded_height; /**< OUT: coded frame height in pixels */ + /** + * area of the frame that should be displayed + * typical example: + * coded_width = 1920, coded_height = 1088 + * display_area = { 0,0,1920,1080 } + */ + struct { + int left; /**< OUT: left position of display rect */ + int top; /**< OUT: top position of display rect */ + int right; /**< OUT: right position of display rect */ + int bottom; /**< OUT: bottom position of display rect */ + } display_area; + cudaVideoChromaFormat chroma_format; /**< OUT: Chroma format */ + unsigned int bitrate; /**< OUT: video bitrate (bps, 0=unknown) */ + /** + * OUT: Display Aspect Ratio = x:y (4:3, 16:9, etc) + */ + struct { + int x; + int y; + } display_aspect_ratio; + /** + * Video Signal Description + * Refer section E.2.1 (VUI parameters semantics) of H264 spec file + */ + struct { + unsigned char video_format : 3; /**< OUT: 0-Component, 1-PAL, 2-NTSC, 3-SECAM, 4-MAC, 5-Unspecified */ + unsigned char video_full_range_flag : 1; /**< OUT: indicates the black level and luma and chroma range */ + unsigned char reserved_zero_bits : 4; /**< Reserved bits */ + unsigned char color_primaries; /**< OUT: chromaticity coordinates of source primaries */ + unsigned char transfer_characteristics; /**< OUT: opto-electronic transfer characteristic of the source picture */ + unsigned char matrix_coefficients; /**< OUT: used in deriving luma and chroma signals from RGB primaries */ + } video_signal_description; + unsigned int seqhdr_data_length; /**< OUT: Additional bytes following (CUVIDEOFORMATEX) */ +} CUVIDEOFORMAT; + +/****************************************************************/ +//! \ingroup STRUCTS +//! \struct CUVIDOPERATINGPOINTINFO +//! Operating point information of scalable bitstream +/****************************************************************/ +typedef struct +{ + cudaVideoCodec codec; + union + { + struct + { + unsigned char operating_points_cnt; + unsigned char reserved24_bits[3]; + unsigned short operating_points_idc[32]; + } av1; + unsigned char CodecReserved[1024]; + }; +} CUVIDOPERATINGPOINTINFO; + +/****************************************************************/ +//! \ingroup STRUCTS +//! \struct CUVIDAV1SEQHDR +//! AV1 specific sequence header information +/****************************************************************/ +typedef struct { + unsigned int max_width; + unsigned int max_height; + unsigned char reserved[1016]; +} CUVIDAV1SEQHDR; + +/****************************************************************/ +//! \ingroup STRUCTS +//! \struct CUVIDEOFORMATEX +//! Video format including raw sequence header information +//! Used in cuvidGetSourceVideoFormat API +/****************************************************************/ +typedef struct +{ + CUVIDEOFORMAT format; /**< OUT: CUVIDEOFORMAT structure */ + union { + CUVIDAV1SEQHDR av1; + unsigned char raw_seqhdr_data[1024]; /**< OUT: Sequence header data */ + }; +} CUVIDEOFORMATEX; + +/****************************************************************/ +//! \ingroup STRUCTS +//! \struct CUAUDIOFORMAT +//! Audio formats +//! Used in cuvidGetSourceAudioFormat API +/****************************************************************/ +typedef struct +{ + cudaAudioCodec codec; /**< OUT: Compression format */ + unsigned int channels; /**< OUT: number of audio channels */ + unsigned int samplespersec; /**< OUT: sampling frequency */ + unsigned int bitrate; /**< OUT: For uncompressed, can also be used to determine bits per sample */ + unsigned int reserved1; /**< Reserved for future use */ + unsigned int reserved2; /**< Reserved for future use */ +} CUAUDIOFORMAT; + + +/***************************************************************/ +//! \enum CUvideopacketflags +//! Data packet flags +//! Used in CUVIDSOURCEDATAPACKET structure +/***************************************************************/ +typedef enum { + CUVID_PKT_ENDOFSTREAM = 0x01, /**< Set when this is the last packet for this stream */ + CUVID_PKT_TIMESTAMP = 0x02, /**< Timestamp is valid */ + CUVID_PKT_DISCONTINUITY = 0x04, /**< Set when a discontinuity has to be signalled */ + CUVID_PKT_ENDOFPICTURE = 0x08, /**< Set when the packet contains exactly one frame or one field */ + CUVID_PKT_NOTIFY_EOS = 0x10, /**< If this flag is set along with CUVID_PKT_ENDOFSTREAM, an additional (dummy) + display callback will be invoked with null value of CUVIDPARSERDISPINFO which + should be interpreted as end of the stream. */ +} CUvideopacketflags; + +/*****************************************************************************/ +//! \ingroup STRUCTS +//! \struct CUVIDSOURCEDATAPACKET +//! Data Packet +//! Used in cuvidParseVideoData API +//! IN for cuvidParseVideoData +/*****************************************************************************/ +typedef struct _CUVIDSOURCEDATAPACKET +{ + unsigned long flags; /**< IN: Combination of CUVID_PKT_XXX flags */ + unsigned long payload_size; /**< IN: number of bytes in the payload (may be zero if EOS flag is set) */ + const unsigned char *payload; /**< IN: Pointer to packet payload data (may be NULL if EOS flag is set) */ + CUvideotimestamp timestamp; /**< IN: Presentation time stamp (10MHz clock), only valid if + CUVID_PKT_TIMESTAMP flag is set */ +} CUVIDSOURCEDATAPACKET; + +// Callback for packet delivery +typedef int (CUDAAPI *PFNVIDSOURCECALLBACK)(void *, CUVIDSOURCEDATAPACKET *); + +/**************************************************************************************************************************/ +//! \ingroup STRUCTS +//! \struct CUVIDSOURCEPARAMS +//! Describes parameters needed in cuvidCreateVideoSource API +//! NVDECODE API is intended for HW accelerated video decoding so CUvideosource doesn't have audio demuxer for all supported +//! containers. It's recommended to clients to use their own or third party demuxer if audio support is needed. +/**************************************************************************************************************************/ +typedef struct _CUVIDSOURCEPARAMS +{ + unsigned int ulClockRate; /**< IN: Time stamp units in Hz (0=default=10000000Hz) */ + unsigned int bAnnexb : 1; /**< IN: AV1 annexB stream */ + unsigned int uReserved : 31; /**< Reserved for future use - set to zero */ + unsigned int uReserved1[6]; /**< Reserved for future use - set to zero */ + void *pUserData; /**< IN: User private data passed in to the data handlers */ + PFNVIDSOURCECALLBACK pfnVideoDataHandler; /**< IN: Called to deliver video packets */ + PFNVIDSOURCECALLBACK pfnAudioDataHandler; /**< IN: Called to deliver audio packets. */ + void *pvReserved2[8]; /**< Reserved for future use - set to NULL */ +} CUVIDSOURCEPARAMS; + + +/**********************************************/ +//! \ingroup ENUMS +//! \enum CUvideosourceformat_flags +//! CUvideosourceformat_flags +//! Used in cuvidGetSourceVideoFormat API +/**********************************************/ +typedef enum { + CUVID_FMT_EXTFORMATINFO = 0x100 /**< Return extended format structure (CUVIDEOFORMATEX) */ +} CUvideosourceformat_flags; + +#if !defined(__APPLE__) +/***************************************************************************************************************************/ +//! \ingroup FUNCTS +//! \fn CUresult CUDAAPI cuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams) +//! Create CUvideosource object. CUvideosource spawns demultiplexer thread that provides two callbacks: +//! pfnVideoDataHandler() and pfnAudioDataHandler() +//! NVDECODE API is intended for HW accelerated video decoding so CUvideosource doesn't have audio demuxer for all supported +//! containers. It's recommended to clients to use their own or third party demuxer if audio support is needed. +/***************************************************************************************************************************/ +CUresult CUDAAPI cuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams); + +/***************************************************************************************************************************/ +//! \ingroup FUNCTS +//! \fn CUresult CUDAAPI cuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams) +//! Create video source +/***************************************************************************************************************************/ +CUresult CUDAAPI cuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams); + +/********************************************************************/ +//! \ingroup FUNCTS +//! \fn CUresult CUDAAPI cuvidDestroyVideoSource(CUvideosource obj) +//! Destroy video source +/********************************************************************/ +CUresult CUDAAPI cuvidDestroyVideoSource(CUvideosource obj); + +/******************************************************************************************/ +//! \ingroup FUNCTS +//! \fn CUresult CUDAAPI cuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state) +//! Set video source state to: +//! cudaVideoState_Started - to signal the source to run and deliver data +//! cudaVideoState_Stopped - to stop the source from delivering the data +//! cudaVideoState_Error - invalid source +/******************************************************************************************/ +CUresult CUDAAPI cuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state); + +/******************************************************************************************/ +//! \ingroup FUNCTS +//! \fn cudaVideoState CUDAAPI cuvidGetVideoSourceState(CUvideosource obj) +//! Get video source state +//! Returns: +//! cudaVideoState_Started - if Source is running and delivering data +//! cudaVideoState_Stopped - if Source is stopped or reached end-of-stream +//! cudaVideoState_Error - if Source is in error state +/******************************************************************************************/ +cudaVideoState CUDAAPI cuvidGetVideoSourceState(CUvideosource obj); + +/******************************************************************************************************************/ +//! \ingroup FUNCTS +//! \fn CUresult CUDAAPI cuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags) +//! Gets video source format in pvidfmt, flags is set to combination of CUvideosourceformat_flags as per requirement +/******************************************************************************************************************/ +CUresult CUDAAPI cuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags); + +/**************************************************************************************************************************/ +//! \ingroup FUNCTS +//! \fn CUresult CUDAAPI cuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags) +//! Get audio source format +//! NVDECODE API is intended for HW accelerated video decoding so CUvideosource doesn't have audio demuxer for all supported +//! containers. It's recommended to clients to use their own or third party demuxer if audio support is needed. +/**************************************************************************************************************************/ +CUresult CUDAAPI cuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags); + +#endif +/**********************************************************************************/ +//! \ingroup STRUCTS +//! \struct CUVIDPARSERDISPINFO +//! Used in cuvidParseVideoData API with PFNVIDDISPLAYCALLBACK pfnDisplayPicture +/**********************************************************************************/ +typedef struct _CUVIDPARSERDISPINFO +{ + int picture_index; /**< OUT: Index of the current picture */ + int progressive_frame; /**< OUT: 1 if progressive frame; 0 otherwise */ + int top_field_first; /**< OUT: 1 if top field is displayed first; 0 otherwise */ + int repeat_first_field; /**< OUT: Number of additional fields (1=ivtc, 2=frame doubling, 4=frame tripling, + -1=unpaired field) */ + CUvideotimestamp timestamp; /**< OUT: Presentation time stamp */ +} CUVIDPARSERDISPINFO; + +/***********************************************************************************************************************/ +//! Parser callbacks +//! The parser will call these synchronously from within cuvidParseVideoData(), whenever there is sequence change or a picture +//! is ready to be decoded and/or displayed. First argument in functions is "void *pUserData" member of structure CUVIDSOURCEPARAMS +//! Return values from these callbacks are interpreted as below. If the callbacks return failure, it will be propagated by +//! cuvidParseVideoData() to the application. +//! Parser picks default operating point as 0 and outputAllLayers flag as 0 if PFNVIDOPPOINTCALLBACK is not set or return value is +//! -1 or invalid operating point. +//! PFNVIDSEQUENCECALLBACK : 0: fail, 1: succeeded, > 1: override dpb size of parser (set by CUVIDPARSERPARAMS::ulMaxNumDecodeSurfaces +//! while creating parser) +//! PFNVIDDECODECALLBACK : 0: fail, >=1: succeeded +//! PFNVIDDISPLAYCALLBACK : 0: fail, >=1: succeeded +//! PFNVIDOPPOINTCALLBACK : <0: fail, >=0: succeeded (bit 0-9: OperatingPoint, bit 10-10: outputAllLayers, bit 11-30: reserved) +/***********************************************************************************************************************/ +typedef int (CUDAAPI *PFNVIDSEQUENCECALLBACK)(void *, CUVIDEOFORMAT *); +typedef int (CUDAAPI *PFNVIDDECODECALLBACK)(void *, CUVIDPICPARAMS *); +typedef int (CUDAAPI *PFNVIDDISPLAYCALLBACK)(void *, CUVIDPARSERDISPINFO *); +typedef int (CUDAAPI *PFNVIDOPPOINTCALLBACK)(void *, CUVIDOPERATINGPOINTINFO*); + +/**************************************/ +//! \ingroup STRUCTS +//! \struct CUVIDPARSERPARAMS +//! Used in cuvidCreateVideoParser API +/**************************************/ +typedef struct _CUVIDPARSERPARAMS +{ + cudaVideoCodec CodecType; /**< IN: cudaVideoCodec_XXX */ + unsigned int ulMaxNumDecodeSurfaces; /**< IN: Max # of decode surfaces (parser will cycle through these) */ + unsigned int ulClockRate; /**< IN: Timestamp units in Hz (0=default=10000000Hz) */ + unsigned int ulErrorThreshold; /**< IN: % Error threshold (0-100) for calling pfnDecodePicture (100=always + IN: call pfnDecodePicture even if picture bitstream is fully corrupted) */ + unsigned int ulMaxDisplayDelay; /**< IN: Max display queue delay (improves pipelining of decode with display) + 0=no delay (recommended values: 2..4) */ + unsigned int bAnnexb : 1; /**< IN: AV1 annexB stream */ + unsigned int uReserved : 31; /**< Reserved for future use - set to zero */ + unsigned int uReserved1[4]; /**< IN: Reserved for future use - set to 0 */ + void *pUserData; /**< IN: User data for callbacks */ + PFNVIDSEQUENCECALLBACK pfnSequenceCallback; /**< IN: Called before decoding frames and/or whenever there is a fmt change */ + PFNVIDDECODECALLBACK pfnDecodePicture; /**< IN: Called when a picture is ready to be decoded (decode order) */ + PFNVIDDISPLAYCALLBACK pfnDisplayPicture; /**< IN: Called whenever a picture is ready to be displayed (display order) */ + PFNVIDOPPOINTCALLBACK pfnGetOperatingPoint; /**< IN: Called from AV1 sequence header to get operating point of a AV1 + scalable bitstream */ + void *pvReserved2[6]; /**< Reserved for future use - set to NULL */ + CUVIDEOFORMATEX *pExtVideoInfo; /**< IN: [Optional] sequence header data from system layer */ +} CUVIDPARSERPARAMS; + +/************************************************************************************************/ +//! \ingroup FUNCTS +//! \fn CUresult CUDAAPI cuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams) +//! Create video parser object and initialize +/************************************************************************************************/ +CUresult CUDAAPI cuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams); + +/************************************************************************************************/ +//! \ingroup FUNCTS +//! \fn CUresult CUDAAPI cuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket) +//! Parse the video data from source data packet in pPacket +//! Extracts parameter sets like SPS, PPS, bitstream etc. from pPacket and +//! calls back pfnDecodePicture with CUVIDPICPARAMS data for kicking of HW decoding +//! calls back pfnSequenceCallback with CUVIDEOFORMAT data for initial sequence header or when +//! the decoder encounters a video format change +//! calls back pfnDisplayPicture with CUVIDPARSERDISPINFO data to display a video frame +/************************************************************************************************/ +CUresult CUDAAPI cuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket); + +/************************************************************************************************/ +//! \ingroup FUNCTS +//! \fn CUresult CUDAAPI cuvidDestroyVideoParser(CUvideoparser obj) +//! Destroy the video parser +/************************************************************************************************/ +CUresult CUDAAPI cuvidDestroyVideoParser(CUvideoparser obj); + +/**********************************************************************************************/ + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif // __NVCUVID_H__ + + diff --git a/extern/nvidia/Video_Codec_SDK_11.1.5/LicenseAgreement.pdf b/extern/nvidia/Video_Codec_SDK_11.1.5/LicenseAgreement.pdf new file mode 100644 index 00000000000..0e44fcdba0d Binary files /dev/null and b/extern/nvidia/Video_Codec_SDK_11.1.5/LicenseAgreement.pdf differ diff --git a/extern/nvidia/Video_Codec_SDK_11.1.5/NOTICES.txt b/extern/nvidia/Video_Codec_SDK_11.1.5/NOTICES.txt new file mode 100644 index 00000000000..073bb17cc75 --- /dev/null +++ b/extern/nvidia/Video_Codec_SDK_11.1.5/NOTICES.txt @@ -0,0 +1,167 @@ +This SDK includes portions of FFMPEG, under the following license: + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/extern/nvidia/Video_Codec_SDK_11.1.5/Read_Me.pdf b/extern/nvidia/Video_Codec_SDK_11.1.5/Read_Me.pdf new file mode 100644 index 00000000000..067238322dc Binary files /dev/null and b/extern/nvidia/Video_Codec_SDK_11.1.5/Read_Me.pdf differ diff --git a/extern/nvidia/Video_Codec_SDK_11.1.5/Release_Notes.pdf b/extern/nvidia/Video_Codec_SDK_11.1.5/Release_Notes.pdf new file mode 100644 index 00000000000..791e96861c3 Binary files /dev/null and b/extern/nvidia/Video_Codec_SDK_11.1.5/Release_Notes.pdf differ diff --git a/extern/nvidia/Video_Codec_SDK_11.1.5/rl_readme.txt b/extern/nvidia/Video_Codec_SDK_11.1.5/rl_readme.txt new file mode 100644 index 00000000000..b9e65b926c1 --- /dev/null +++ b/extern/nvidia/Video_Codec_SDK_11.1.5/rl_readme.txt @@ -0,0 +1,9 @@ +Reality Labs notes. + +This is shortened set of NVIDIA video codec SDK. + +Folder Doc, Lib and Samples are removed since we do not use them anyway. +Only Interface folder and readme and license files are left. + + + diff --git a/extern/perceptualdiff/test/Aqsis_vase.png b/extern/perceptualdiff/test/Aqsis_vase.png new file mode 100644 index 00000000000..e60b77fff2e Binary files /dev/null and b/extern/perceptualdiff/test/Aqsis_vase.png differ diff --git a/extern/perceptualdiff/test/Aqsis_vase_ref.png b/extern/perceptualdiff/test/Aqsis_vase_ref.png new file mode 100644 index 00000000000..3637423158f Binary files /dev/null and b/extern/perceptualdiff/test/Aqsis_vase_ref.png differ diff --git a/extern/perceptualdiff/test/Bug1102605.tif b/extern/perceptualdiff/test/Bug1102605.tif new file mode 100644 index 00000000000..f12511e13f4 Binary files /dev/null and b/extern/perceptualdiff/test/Bug1102605.tif differ diff --git a/extern/perceptualdiff/test/Bug1102605_ref.tif b/extern/perceptualdiff/test/Bug1102605_ref.tif new file mode 100644 index 00000000000..4739648a770 Binary files /dev/null and b/extern/perceptualdiff/test/Bug1102605_ref.tif differ diff --git a/extern/perceptualdiff/test/Bug1471457.tif b/extern/perceptualdiff/test/Bug1471457.tif new file mode 100644 index 00000000000..73716d3d316 Binary files /dev/null and b/extern/perceptualdiff/test/Bug1471457.tif differ diff --git a/extern/perceptualdiff/test/Bug1471457_ref.tif b/extern/perceptualdiff/test/Bug1471457_ref.tif new file mode 100644 index 00000000000..30e0062200c Binary files /dev/null and b/extern/perceptualdiff/test/Bug1471457_ref.tif differ diff --git a/extern/perceptualdiff/test/cam_mb.tif b/extern/perceptualdiff/test/cam_mb.tif new file mode 100644 index 00000000000..70646632690 Binary files /dev/null and b/extern/perceptualdiff/test/cam_mb.tif differ diff --git a/extern/perceptualdiff/test/cam_mb_ref.tif b/extern/perceptualdiff/test/cam_mb_ref.tif new file mode 100644 index 00000000000..b89562624f9 Binary files /dev/null and b/extern/perceptualdiff/test/cam_mb_ref.tif differ diff --git a/extern/perceptualdiff/test/fish1.png b/extern/perceptualdiff/test/fish1.png new file mode 100644 index 00000000000..ec45642dd0a Binary files /dev/null and b/extern/perceptualdiff/test/fish1.png differ diff --git a/extern/perceptualdiff/test/fish2.png b/extern/perceptualdiff/test/fish2.png new file mode 100644 index 00000000000..bfe3b16d04e Binary files /dev/null and b/extern/perceptualdiff/test/fish2.png differ diff --git a/extern/perceptualdiff/test/run_tests.sh b/extern/perceptualdiff/test/run_tests.sh new file mode 100755 index 00000000000..47cce1863c1 --- /dev/null +++ b/extern/perceptualdiff/test/run_tests.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Script to run pdiff against a set of image file pairs, and check that the +# PASS or FAIL status is as expected. + +#------------------------------------------------------------------------------ +# Image files and expected perceptualdiff PASS/FAIL status. Line format is +# (PASS|FAIL) image1.(tif|png) image2.(tif|png) +# +# Edit the following lines to add additional tests. +function all_tests { +cat <&2 + fi +done <(), "path to a file with user events") ((MASTER_IMAGE_COLOR_FORMAT + ",i").c_str(), po::value(), ("master image color format. Options: " + supported_color_formats).c_str()) + + ((MASTER_IMAGE_COMPRESSOR + ",i").c_str(), po::value(), + ("master image compressor. Options: " + supported_image_compressors).c_str()) + ((DENOISER + ",d").c_str(), po::value(), denoiser_help.c_str()); po::variables_map vm; po::store(po::parse_command_line(argc, argv, options), vm); @@ -191,6 +204,19 @@ if not provided, client uses the resolution net camera object was saved with") } } + if (vm.count(MASTER_IMAGE_COMPRESSOR)) { + string master_image_compressor_str = vm[MASTER_IMAGE_COMPRESSOR].as(); + if(master_image_compressor_str == IMAGE_COMPRESSOR_JPEG) { + master_image_compressor = ClusterSessionParams::MasterImageCompressor::MASTER_IMAGE_COMPRESSOR_JPEG; + } else if(master_image_compressor_str == IMAGE_COMPRESSOR_NVENCODER) { + master_image_compressor = ClusterSessionParams::MasterImageCompressor::MASTER_IMAGE_COMPRESSOR_NVENCODER; + } else { + VLOG(1) << "FATAL. Requested image color format is not supported. Requested: " << master_image_compressor_str << + " Supported: " << supported_color_formats; + return false; + } + } + return true; } diff --git a/intern/cycles/app/headless_light_client/config.h b/intern/cycles/app/headless_light_client/config.h index 829aa2f66a4..e000b0eb708 100644 --- a/intern/cycles/app/headless_light_client/config.h +++ b/intern/cycles/app/headless_light_client/config.h @@ -35,8 +35,11 @@ class Config { static const string SAVE_VIDEO_STREAM_PATH; static const string USER_EVENTS_PATH; static const string MASTER_IMAGE_COLOR_FORMAT; + static const string MASTER_IMAGE_COMPRESSOR; static const string COLOR_FORMAT_LINEAR; static const string COLOR_FORMAT_SRGB; + static const string IMAGE_COMPRESSOR_JPEG; + static const string IMAGE_COMPRESSOR_NVENCODER; public: size_t start_frame_id = 0; @@ -60,6 +63,8 @@ public: ClusterSessionParams::MasterDenoiser master_denoiser; ClusterSessionParams::MasterImageColorFormat master_image_color_format = ClusterSessionParams::MasterImageColorFormat::MASTER_IMAGE_COLOR_FORMAT_SRGB; + ClusterSessionParams::MasterImageCompressor master_image_compressor = + ClusterSessionParams::ClusterSessionParams::MasterImageCompressor::MASTER_IMAGE_COMPRESSOR_JPEG; Config(); ~Config(); diff --git a/intern/cycles/app/headless_light_client/light_cluster_rendering_client.cpp b/intern/cycles/app/headless_light_client/light_cluster_rendering_client.cpp index 19dbcb09a5a..d4d6cafc8b0 100644 --- a/intern/cycles/app/headless_light_client/light_cluster_rendering_client.cpp +++ b/intern/cycles/app/headless_light_client/light_cluster_rendering_client.cpp @@ -8,15 +8,23 @@ #include "camera_provider.h" #include "cluster_session_params.h" -#ifdef WITH_OPTIX -#include "cuda_context.h" +#ifdef WITH_CUDA +#include "cuda_context_provider.h" #endif +#include "image_io_util.h" #include "light_cluster_rendering_client.h" #include "modify_object_message.h" #include "net_camera.h" #include "user_events_provider.h" +#ifdef WITH_CUDA +using cgr_libcluster::CudaContextProvider; +using cgr_libcluster::CUDAContextScope; +#endif +using cgr_libcluster::ImageIOUtil; using cluster_rendering_net_lib::Retry; +using OpenImageIO_v2_4::ImageOutput; +using OpenImageIO_v2_4::TypeDesc; using namespace std::chrono; @@ -25,6 +33,7 @@ namespace headless_light_client { const std::string LightClusterRenderingClient::IMAGE = "Image"; const std::string LightClusterRenderingClient::CAMERA = "Camera"; const std::string LightClusterRenderingClient::JPG = ".jpg"; +const std::string LightClusterRenderingClient::PNG = ".png"; const double LightClusterRenderingClient::WAIT_LAST_FRAME_TIMEOUT_MS = 1500; LightClusterRenderingClient::LightClusterRenderingClient(const Config & config, CameraProvider & camera_provider, @@ -42,16 +51,12 @@ LightClusterRenderingClient::LightClusterRenderingClient(const Config & config, image_client_events_handler(IMAGE), netlib_camera_client(camera_client_events_handler), netlib_image_client(image_client_events_handler) -#ifdef WITH_OPTIX ,encoded_image(INITIAL_SIZE_OF_BUFFER_FOR_ENCODED_IMAGE) -#endif { } LightClusterRenderingClient::~LightClusterRenderingClient() { -#ifdef WITH_OPTIX video_stream_file.close(); -#endif } void LightClusterRenderingClient::run() { @@ -69,10 +74,12 @@ void LightClusterRenderingClient::run() { camera.cam_width = config.frame_width; camera.cam_height = config.frame_height; } - VLOG(3) << "client send camera for frame: " << camera.frame << " cam_width: " << camera.cam_width << " cam_height: " << camera.cam_height; + VLOG(3) << "client send camera for frame: " << camera.frame << " cam_width: " << camera.cam_width << + " cam_height: " << camera.cam_height; camera.sampleCount = samples_count; camera.master_denoiser = master_denoiser; camera.master_image_color_format = config.master_image_color_format; + camera.master_image_compressor = config.master_image_compressor; if(is_first_camera) { camera.frame = 0; is_first_camera = false; @@ -127,9 +134,9 @@ void LightClusterRenderingClient::nextCameraDelay(const int frame_id) { } } -#ifdef WITH_OPTIX +#ifdef WITH_CUDA void LightClusterRenderingClient::encodeImage(NvEncoder * nv_encoder_ptr, - const std::vector & raw_image, AutoEnlargingBuffer & encoded_image_out) { + const std::vector & raw_image, std::vector & encoded_image_out) { auto start = high_resolution_clock::now(); if(nv_encoder_ptr->encode(raw_image.data(), encoded_image_out)) { std::chrono::duration duration_sec = high_resolution_clock::now() - start; @@ -140,9 +147,13 @@ void LightClusterRenderingClient::encodeImage(NvEncoder * nv_encoder_ptr, throw std::runtime_error(message); } } +#endif void LightClusterRenderingClient::writeImageToVideoStreamFile(std::ofstream &video_stream_file, - const AutoEnlargingBuffer &encoded_image) { + const std::vector &encoded_image) { + if(!video_stream_file.is_open()) { + video_stream_file.open(config.save_video_stream_path, std::ios::app | std::ios::binary); + } if(video_stream_file.is_open()) { video_stream_file.write(reinterpret_cast(encoded_image.data()), encoded_image.size()); VLOG(3) << "Wrote encoded image of size: " << encoded_image.size(); @@ -151,7 +162,7 @@ void LightClusterRenderingClient::writeImageToVideoStreamFile(std::ofstream &vid throw std::invalid_argument(message); } } -#endif + // Simple RGB to RGBA convertion. // Headless client gets image in jpeg format from the master and decompress it into RGB format // This method is used for convertion from RGB to RGBA since NVEncoder expects RGBA flavor @@ -181,6 +192,8 @@ void LightClusterRenderingClient::imageChannelLoop() { VLOG(3) << "Image channel received from master buffer size: " << buf_size; NetCamera net_camera; netlib_image_client.receive(&net_camera, sizeof(NetCamera)); + // log line below is used by get_metrics.py to calculate stats. If you change it + // please make sure get_metrics.py still works correctly. Update if needed. VLOG(3) << "net camera received for frame: " << net_camera.frame << " " << net_camera.cam_width << " " << net_camera.cam_height; std::unique_ptr > image_buffer_uptr(new std::vector(buf_size)); @@ -190,6 +203,7 @@ void LightClusterRenderingClient::imageChannelLoop() { received_image_from_master = true; ++num_received_frames; outputImageIfRequested(image_buffer_uptr.get(), net_camera); + outputVideoStreamIfRequested(image_buffer_uptr.get(), net_camera); image_cv.notify_one(); } } else { @@ -201,49 +215,88 @@ void LightClusterRenderingClient::imageChannelLoop() { VLOG(3) << "Finished image channel thread."; } -void LightClusterRenderingClient::outputImageIfRequested(std::vector * jpeg_image, const NetCamera & net_camera) { - if(config.save_images_path.length() < 1 && config.save_video_stream_path.length() < 1) { - return; // no output is requested, return; +void LightClusterRenderingClient::outputImageIfRequested(std::vector * image, const NetCamera & net_camera) { + if(config.save_images_path.length() < 1) { + return; // no image output is requested, return; } - if(jpeg_image == nullptr) { - std::string message("FATAL. Cannot output an image, a pointer to the image is null"); - VLOG(3) << message; - throw std::runtime_error(message); - } - uint8_t * flipped_jpeg = nullptr; - unsigned long flipped_jpeg_size = 0; - if(jpeg_tools_ptr == nullptr) { - jpeg_tools_ptr.reset(new JpegTools()); - } - // Images that we receive from the master are upside down, we need to flip images before saving - jpeg_tools_ptr->flipJpegImage(jpeg_image->data(), jpeg_image->size(), &flipped_jpeg, flipped_jpeg_size); - if(config.save_images_path.length() > 0) { - // Saving of noisy images (which are significantly larger due to solt & papper noise) - // to /mnt/graphics_ssd may take about 30 ms what limits read image thread to about 30 fps. - // Since we do not want IO operations to affect averall system performance on hight fps - // we save images in a different thread - boost::asio::post(save_image_thread_pool, std::bind(&LightClusterRenderingClient::saveImage, this, - getImagePath(net_camera.frame, JPG), flipped_jpeg, flipped_jpeg_size)); - } - if(config.save_video_stream_path.length() > 0) { -#ifdef WITH_OPTIX - if(!nv_encoder_ptr) { - // create NVEncoder once we got the first frame with width and height of the image - VLOG(3) << "Creating NvEncoder"; - nv_encoder_ptr.reset(new NvEncoder(NvEncoder::BUFFER_FORMAT_FOR_IMAGES_FROM_MASTER, - CudaContext::createCudaContext(), - net_camera.cam_width, net_camera.cam_height)); - video_stream_file.open(config.save_video_stream_path, std::ios::app | std::ios::binary); + if(net_camera.master_image_compressor == + ClusterSessionParams::MasterImageCompressor::MASTER_IMAGE_COMPRESSOR_JPEG) { + if(image == nullptr) { + std::string message("FATAL. Cannot output an image, a pointer to the image is null"); + VLOG(3) << message; + throw std::runtime_error(message); } + uint8_t * flipped_jpeg = nullptr; + unsigned long flipped_jpeg_size = 0; + if(jpeg_tools_ptr == nullptr) { + jpeg_tools_ptr.reset(new JpegTools()); + } + // Images that we receive from the master are upside down, we need to flip images before saving + jpeg_tools_ptr->flipJpegImage(image->data(), image->size(), &flipped_jpeg, flipped_jpeg_size); + // Saving of noisy images (which are significantly larger due to solt & papper noise) + // to /mnt/graphics_ssd may take about 30 ms what limits read image thread to about 30 fps. + // Since we do not want IO operations to affect averall system performance on hight fps + // we save images in a different thread + boost::asio::post(save_image_thread_pool, std::bind(&LightClusterRenderingClient::saveImageAsIs, this, + getImagePath(net_camera.frame, JPG), flipped_jpeg, flipped_jpeg_size)); + } +#ifdef WITH_CUDA + else if(net_camera.master_image_compressor == + ClusterSessionParams::MasterImageCompressor::MASTER_IMAGE_COMPRESSOR_NVENCODER) { + if(!nv_decoder_ptr) { + VLOG(3) << "Creating nvdecoder"; + CUcontext cuda_context = CudaContextProvider::getPrimaryContext(CUDA_DEVICE_NUM); + const CUDAContextScope scope(cuda_context); + nv_decoder_ptr.reset(new NvDecoder(cuda_context)); + } + nv_decoder_ptr->decode(*image, net_camera.frame, &decoded_image); + boost::asio::post(save_image_thread_pool, std::bind(&LightClusterRenderingClient::saveImageInFormat, this, + PNG, net_camera.frame, decoded_image.data(), net_camera.cam_width, net_camera.cam_height)); + } #endif + else { + VLOG(1) << "Cannot save image, unknown compression: " << net_camera.master_image_compressor; + } +} + +void LightClusterRenderingClient::outputVideoStreamIfRequested(std::vector * image, const NetCamera & net_camera) { + if(config.save_video_stream_path.length() < 1) { + return; // no video stream output is requested, return; + } + if(net_camera.master_image_compressor == + ClusterSessionParams::MasterImageCompressor::MASTER_IMAGE_COMPRESSOR_JPEG) { +#ifdef WITH_CUDA + if(!nv_encoder_ptr) { + VLOG(3) << "Creating NvEncoder"; + CUcontext cuda_context = CudaContextProvider::getPrimaryContext(CUDA_DEVICE_NUM); + const CUDAContextScope scope(cuda_context); + nv_encoder_ptr.reset(new NvEncoder(NV_ENC_BUFFER_FORMAT_ABGR, + cuda_context, + net_camera.cam_width, net_camera.cam_height)); + } std::vector decompressed_image_rgb(net_camera.cam_width * net_camera.cam_height * 3); std::vector decompressed_image_rgba(net_camera.cam_width * net_camera.cam_height * 4); + uint8_t * flipped_jpeg = nullptr; + unsigned long flipped_jpeg_size = 0; + if(jpeg_tools_ptr == nullptr) { + jpeg_tools_ptr.reset(new JpegTools()); + } + // Images that we receive from the master are upside down, we need to flip images before saving + jpeg_tools_ptr->flipJpegImage(image->data(), image->size(), &flipped_jpeg, flipped_jpeg_size); jpeg_tools_ptr->decompressJpeg(flipped_jpeg, flipped_jpeg_size, decompressed_image_rgb); rgbToRgba(decompressed_image_rgb, decompressed_image_rgba); -#ifdef WITH_OPTIX encodeImage(nv_encoder_ptr.get(), decompressed_image_rgba, encoded_image); writeImageToVideoStreamFile(video_stream_file, encoded_image); +#else + throw std::runtime_error("ERROR. Client is compiled without CUDA support so has no nvencoder and\ + cannot encode received image and save video stream"); #endif + } else if(net_camera.master_image_compressor == + ClusterSessionParams::MasterImageCompressor::MASTER_IMAGE_COMPRESSOR_NVENCODER) { + // image is already video-encoded by nvencoder, save it as is + writeImageToVideoStreamFile(video_stream_file, *image); + } else { + VLOG(1) << "Cannot save video stream, unknown compressor: " << net_camera.master_image_compressor; } } @@ -254,17 +307,24 @@ std::string LightClusterRenderingClient::getImagePath(const int frame_id, const return path_ostream.str(); } -void LightClusterRenderingClient::saveImage(const std::string file_path, unsigned char * jpeg_image, int jpeg_image_size) { +void LightClusterRenderingClient::saveImageAsIs(const std::string file_path, unsigned char * image, int image_size) { VLOG(3) << "Saving image into: " << file_path; auto start = high_resolution_clock::now(); std::ofstream wf(file_path, std::ios::out | std::ios::binary); - wf.write(reinterpret_cast(jpeg_image), jpeg_image_size); + wf.write(reinterpret_cast(image), image_size); wf.close(); auto end = high_resolution_clock::now(); double time_taken = duration_cast(end - start).count(); VLOG(3) << "Image saved successfully. Save image time: " << time_taken << " ms"; } +void LightClusterRenderingClient::saveImageInFormat(std::string format_extension, int frame_id, void * image, + int width, int height) { + std::string file_path = getImagePath(frame_id, format_extension); + std::unique_ptr image_output = std::unique_ptr(ImageOutput::create(file_path)); + ImageIOUtil::saveFrame(file_path, TypeDesc::UCHAR, image_output.get(), image, width, height); +} + bool LightClusterRenderingClient::connectToMaster() { bool connected = false; diff --git a/intern/cycles/app/headless_light_client/light_cluster_rendering_client.h b/intern/cycles/app/headless_light_client/light_cluster_rendering_client.h index c76b27ae08d..5a33bf7466c 100644 --- a/intern/cycles/app/headless_light_client/light_cluster_rendering_client.h +++ b/intern/cycles/app/headless_light_client/light_cluster_rendering_client.h @@ -11,8 +11,9 @@ #include "jpeg_tools.h" -#ifdef WITH_OPTIX +#ifdef WITH_CUDA #include "nv_encoder.h" +#include "nv_decoder.h" #endif #include "camera_provider.h" @@ -20,14 +21,15 @@ #include "netclient.h" #include "netlib_event_handler.h" #include "rpc_blender_protocol.h" +#include "./utils/vector_types.h" // for uchar3 namespace headless_light_client { using cgr_libcluster::ClusterSessionParams; using cgr_libcluster::RpcBlenderProtocol; -#ifdef WITH_OPTIX +#ifdef WITH_CUDA +using cgr_libcluster::NvDecoder; using cgr_libcluster::NvEncoder; -using cgr_libcluster::AutoEnlargingBuffer; #endif class CameraProvider; @@ -51,6 +53,8 @@ private: static const std::string IMAGE; static const std::string CAMERA; static const std::string JPG; + static const std::string PNG; + static const size_t CUDA_DEVICE_NUM = 0; static const size_t MAX_NUM_RETRIES = 5; static const size_t RETRY_INTERVAL_MS = 2000; static const size_t INITIAL_SIZE_OF_BUFFER_FOR_ENCODED_IMAGE = 50000; @@ -81,11 +85,13 @@ private: std::condition_variable image_cv; std::atomic received_image_from_master = false; std::unique_ptr jpeg_tools_ptr; -#ifdef WITH_OPTIX - std::unique_ptr nv_encoder_ptr; - AutoEnlargingBuffer encoded_image; + #ifdef WITH_CUDA + std::unique_ptr nv_encoder_ptr; + std::unique_ptr nv_decoder_ptr; + #endif + std::vector encoded_image; + std::vector decoded_image; std::ofstream video_stream_file; -#endif boost::asio::thread_pool save_image_thread_pool = boost::asio::thread_pool(4); @@ -99,13 +105,16 @@ private: void imageChannelLoop(); void nextCameraDelay(const int frame_id); std::string getImagePath(const int frame_id, const std::string & file_extension); - void outputImageIfRequested(uint8_t * jpeg_image, int jpeg_image_size, const NetCamera & net_camera); - void outputImageIfRequested(std::vector * jpeg_image, const NetCamera & net_camera); - void saveImage(const std::string file_path, unsigned char * jpeg_image, int jpeg_image_size); -#ifdef WITH_OPTIX - void writeImageToVideoStreamFile(std::ofstream &video_stream_file, const AutoEnlargingBuffer &encoded_image); + void outputVideoStreamIfRequested(std::vector * image, const NetCamera & net_camera); + void outputImageIfRequested(std::vector * image, const NetCamera & net_camera); + void saveImageAsIs(const std::string file_path, unsigned char * image, int image_size); + // Saves image in a format specified as a file extention + void saveImageInFormat(std::string format_extension, int frame_id, void * image, int width, int height); + void writeImageToVideoStreamFile(std::ofstream &video_stream_file, + const std::vector &encoded_image); +#ifdef WITH_CUDA void encodeImage(NvEncoder * nv_encoder_ptr, const std::vector & raw_image, - AutoEnlargingBuffer & encoded_image_out); + std::vector & encoded_image_out); #endif }; diff --git a/intern/cycles/app/headless_light_client/main.cpp b/intern/cycles/app/headless_light_client/main.cpp index bc08a65c4cd..6faa58c5039 100644 --- a/intern/cycles/app/headless_light_client/main.cpp +++ b/intern/cycles/app/headless_light_client/main.cpp @@ -29,8 +29,8 @@ using headless_light_client::LightClusterRenderingClient; using headless_light_client::UserEventsProvider; static void initLogging(const char* argv0) { - setenv("GLOG_v", "3", 1); - setenv("GLOG_vmodule", "session_network*=3", 1); + //setenv("GLOG_v", "3", 1); + //setenv("GLOG_vmodule", "session_network*=3", 1); google::InitGoogleLogging(argv0); GFLAGS_NAMESPACE::SetCommandLineOption("logtostderr", "1"); } diff --git a/intern/cycles/blender/addon/ui.py b/intern/cycles/blender/addon/ui.py index a7bc3b8ce25..df5fd71f32a 100644 --- a/intern/cycles/blender/addon/ui.py +++ b/intern/cycles/blender/addon/ui.py @@ -163,6 +163,7 @@ class CYCLES_RENDER_PT_render_session_mode(CyclesButtonsPanel, Panel): netsub.enabled = net.enabled and render.render_session_mode == 'MASTER' netsub.prop(render, "num_servers") netport.prop(render, "master_image_color_format") + netport.prop(render, "master_image_compressor") #Temp turned off. TODO: [pmishchuk] enable when ready # import _cycles # if _cycles.with_webrtc: @@ -833,7 +834,7 @@ class CYCLES_RENDER_PT_performance_acceleration_structure(CyclesButtonsPanel, Pa @classmethod def poll(cls, context): - return not use_optix(context) + return not use_optix(context) def draw(self, context): import _cycles diff --git a/intern/cycles/blender/sync.cpp b/intern/cycles/blender/sync.cpp index 30abe945a54..4f30ac82c18 100644 --- a/intern/cycles/blender/sync.cpp +++ b/intern/cycles/blender/sync.cpp @@ -871,6 +871,13 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine &b_engine, // use default color format. master_image_color_format_num = cgr_libcluster::ClusterSessionParams::DEFAULT_MASTER_IMAGE_COLOR_FORMAT; } + int master_image_compressor_num = r.master_image_compressor(); + if (master_image_compressor_num <= cgr_libcluster::ClusterSessionParams::MasterImageCompressor::MASTER_IMAGE_COMPRESSOR_NONE || + master_image_compressor_num >= cgr_libcluster::ClusterSessionParams::MasterImageCompressor::MASTER_IMAGE_COMPRESSOR_END) { + // Blend file has no saved value for image compressor (it was saved before we added this property) + // use default color format. + master_image_compressor_num = cgr_libcluster::ClusterSessionParams::DEFAULT_MASTER_IMAGE_COMPRESSOR; + } params.cluster_session_params = cgr_libcluster::ClusterSessionParams( static_cast(r.render_session_mode()), @@ -878,7 +885,8 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine &b_engine, static_cast(r.master_denoiser()), r.save_denoise_io(), r.save_streamed_image(), save_every_n_images, r.save_cameras(), r.filepath(), - static_cast(master_image_color_format_num)); + static_cast(master_image_color_format_num), + static_cast(master_image_compressor_num)); params.cluster_session_params.fps = r.fps(); cgr_libcluster::ModifyObjectParams & modify_object_params = params.cluster_session_params.modify_object_params; diff --git a/intern/cycles/cluster_rendering/CMakeLists.txt b/intern/cycles/cluster_rendering/CMakeLists.txt index ced54595952..5736e383975 100644 --- a/intern/cycles/cluster_rendering/CMakeLists.txt +++ b/intern/cycles/cluster_rendering/CMakeLists.txt @@ -1,5 +1,5 @@ +if(WITH_CYCLES_DEVICE_CUDA) + add_subdirectory(libcluster_cuda_kernels) +endif() add_subdirectory(libcluster) add_subdirectory(libnetwork) -if(WITH_WEBRTC) - add_subdirectory(libstream) -endif(WITH_WEBRTC) diff --git a/intern/cycles/cluster_rendering/libcluster/CMakeLists.txt b/intern/cycles/cluster_rendering/libcluster/CMakeLists.txt index 23f1fff25d2..7b07a96aab8 100644 --- a/intern/cycles/cluster_rendering/libcluster/CMakeLists.txt +++ b/intern/cycles/cluster_rendering/libcluster/CMakeLists.txt @@ -1,7 +1,7 @@ add_subdirectory(test) #add_subdirectory(../../../../extern/flatbuffers) # ${CMAKE_CURRENT_BINARY_DIR}/flatbuffers-build -# EXCLUDE_FROM_ALL) +# EXCLUDE_FROM_ALL) #SET(OIDN_PATH ${PROJECT_SOURCE_DIR}/../lib) #if(APPLE) #SET(OIDN_FOLDER ${OIDN_PATH}/oidn.macos) @@ -9,7 +9,9 @@ add_subdirectory(test) #SET(OIDN_FOLDER ${OIDN_PATH}/oidn.linux) #endif() -find_package(Boost 1.48 COMPONENTS serialization REQUIRED) +# From William: find_package(Boost 1.48 COMPONENTS serialization REQUIRED) +# Does not work on Windows at the moment, so turning this REQUIRED off +find_package(Boost 1.48 COMPONENTS serialization) find_package(TurboJPEG REQUIRED) # Uncomment instead of the above if using VCPKG # find_package(libjpeg-turbo REQUIRED PATHS) @@ -54,34 +56,52 @@ set(SRC net_client.cpp rpc_blender_protocol.cpp image_io_util.cpp + compression/turbojpeg_compressor.cpp utils/timer.cpp utils/vector_types.cpp + utils/image.cpp denoising/denoise_buffer.cpp denoising/denoising_context.cpp denoising/master_denoiser.cpp denoising/master_oidn_denoiser.cpp ) +if(WITH_CYCLES_DEVICE_OPTIX) + list(APPEND SRC denoising/master_optix_denoiser.cpp) +endif() + set(FLAT_BUFFER_FILES net_camera.fbs ) function(FLATBUFFERS_GENERATE_C_HEADERS Name) if(NOT FLATBUFFERS_FLATC_EXECUTABLE) set(FLATBUFFERS_FLATC_EXECUTABLE $) - if(NOT WIN32) - set(FLATBUFFERS_FLATC_EXECUTABLE ${CMAKE_BINARY_DIR}/bin/flatc) - message(WARNING "Using flatc binary FLATBUFFERS_FLATC_EXECUTABLE") - else() - # There seems to be a bug on Windows that when the CMAKE_BUILD_TYPE is undefined the - # CMAKE_BUILD_TYPE_INIT is set to "Debug" when it should be "Release" - message("Foo is located at:${CONFIG}") - # if($) - set(FLATBUFFERS_FLATC_EXECUTABLE $) # ${CMAKE_BINARY_DIR}/bin/Debug/flatc.exe) - #else() - # set(FLATBUFFERS_FLATC_EXECUTABLE ${CMAKE_BINARY_DIR}/bin/Release/flatc.exe) - #endif() - message(WARNING "Using Windows flatc binary:${FLATBUFFERS_FLATC_EXECUTABLE} for build type:${CMAKE_BUILD_TYPE} ${CMAKE_BUILD_TYPE_INIT}") - endif() +# if(NOT WIN32) +# #if(APPLE) +# # There seems to be a bug on Windows that when the CMAKE_BUILD_TYPE is undefined the +# # CMAKE_BUILD_TYPE_INIT is set to "Debug" when it should be "Release" +# message("Foo is located at:${CONFIG}") +# # if($) +# set(FLATBUFFERS_FLATC_EXECUTABLE $) # ${CMAKE_BINARY_DIR}/bin/Debug/flatc) +# #else() +# # set(FLATBUFFERS_FLATC_EXECUTABLE ${CMAKE_BINARY_DIR}/bin/Release/flatc) +# #endif() +# message(WARNING "Using MacOS flatc binary:${FLATBUFFERS_FLATC_EXECUTABLE}") +# #else() +# set(FLATBUFFERS_FLATC_EXECUTABLE ${CMAKE_BINARY_DIR}/bin/flatc) +# message(WARNING "Using flatc binary FLATBUFFERS_FLATC_EXECUTABLE") +# #endif() +# else() +# # There seems to be a bug on Windows that when the CMAKE_BUILD_TYPE is undefined the +# # CMAKE_BUILD_TYPE_INIT is set to "Debug" when it should be "Release" +# message("Foo is located at:${CONFIG}") +# # if($) +# set(FLATBUFFERS_FLATC_EXECUTABLE $) # ${CMAKE_BINARY_DIR}/bin/Debug/flatc.exe) +# #else() +# # set(FLATBUFFERS_FLATC_EXECUTABLE ${CMAKE_BINARY_DIR}/bin/Release/flatc.exe) +# #endif() +# message(WARNING "Using Windows flatc binary:${FLATBUFFERS_FLATC_EXECUTABLE} for build type:${CMAKE_BUILD_TYPE} ${CMAKE_BUILD_TYPE_INIT}") +# endif() endif() set(FLATC_OUTPUTS) foreach(FILE ${ARGN}) @@ -93,18 +113,18 @@ function(FLATBUFFERS_GENERATE_C_HEADERS Name) set(INPUT_FBS_PATH "${CMAKE_CURRENT_SOURCE_DIR}/${FILE}") cmake_path(NATIVE_PATH INPUT_FBS_PATH NATIVE_INPUT_FBS_PATH) set(OUTPUT_DIR_PATH "${CMAKE_CURRENT_BINARY_DIR}/") - cmake_path(NATIVE_PATH OUTPUT_DIR_PATH NATIVE_OUTPUT_DIR_PATH) + cmake_path(NATIVE_PATH OUTPUT_DIR_PATH NATIVE_OUTPUT_DIR_PATH) add_custom_command(OUTPUT ${FLATC_OUTPUT} COMMAND ${FLATBUFFERS_FLATC_EXECUTABLE} ARGS -c -o "${NATIVE_OUTPUT_DIR_PATH}" ${INPUT_FBS_PATH} DEPENDS ${DEPS} - COMMENT "Building C++ header for ${FILE}" + COMMENT "Building C++ header for ${FILE}" COMMENT "Running ${FLATBUFFERS_FLATC_EXECUTABLE} -c -o \"${NATIVE_OUTPUT_DIR_PATH}\" ${NATIVE_INPUT_FBS_PATH}" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) endforeach() set(${Name}_OUTPUTS ${FLATC_OUTPUTS} PARENT_SCOPE) endfunction() - + #FLATBUFFERS_GENERATE_C_HEADERS(FLAT_BUFFER_FILES) # compile_flatbuffers_schema_to_cpp(net_camera.fbs) flatbuffers_generate_c_headers(cycles_libcluster_flat_headers net_camera.fbs) @@ -140,6 +160,7 @@ set(SRC_HEADERS utils/logging.h utils/timer.h utils/vector_types.h + utils/image.h denoising/denoise_buffer.h denoising/denoising_context.h denoising/master_denoiser.h @@ -152,22 +173,31 @@ set(LIB ${Boost_LIBRARIES} ${TurboJPEG_LIBRARIES} flatbuffers +# rt ) -if(WITH_CYCLES_DEVICE_CUDA OR WITH_CYCLES_DEVICE_OPTIX) - message(STATUS "Using CUDA adding nv_encoder") - list(APPEND INC ${OPTIX_INCLUDE_DIRS}) - list(APPEND SRC denoising/master_optix_denoiser.cpp compression/nv_encoder.cpp) - list(APPEND SRC_HEADERS denoising/master_optix_denoiser.h compression/nv_encoder.h) +if(WITH_CYCLES_DEVICE_OPTIX) + list(APPEND INC ${OPTIX_INCLUDE_DIRS}) +endif() + +if(WITH_CYCLES_DEVICE_CUDA AND UNIX) + message(STATUS "Building with CUDA, adding nv_encoder to the libcluster") + find_library(NVENCODEAPI_LIB nvidia-encode) + find_library(NVCUVID_LIB nvcuvid HINTS "/lib64/") # the same folder on dev laptop and docker image + list(APPEND SRC compression/nv_encoder.cpp) + list(APPEND SRC_HEADERS compression/nv_encoder.h) + list(APPEND SRC compression/nv_decoder.cpp) + list(APPEND SRC_HEADERS compression/nv_decoder.h) + list(APPEND LIB cycles_libcluster_cuda_kernels) + list(APPEND LIB ${NVENCODEAPI_LIB}) + list(APPEND LIB ${NVCUVID_LIB}) if(WITH_CUDA_DYNLOAD) - list(APPEND LIB - extern_cuew - ) + list(APPEND LIB extern_cuew) else() - list(APPEND LIB - ${CUDA_CUDA_LIBRARY} - ) + list(APPEND LIB ${CUDA_CUDA_LIBRARY}) endif() +else() + message(STATUS "No CUDA or we are not on Linux so building libcluster without nv_encoder") endif() #if(APPLE) diff --git a/intern/cycles/cluster_rendering/libcluster/cluster_session_params.h b/intern/cycles/cluster_rendering/libcluster/cluster_session_params.h index 143ceaa5a0f..fbd1f3c81bc 100644 --- a/intern/cycles/cluster_rendering/libcluster/cluster_session_params.h +++ b/intern/cycles/cluster_rendering/libcluster/cluster_session_params.h @@ -61,12 +61,24 @@ public: // END must go last MASTER_IMAGE_COLOR_FORMAT_END, }; + + enum MasterImageCompressor{ + MASTER_IMAGE_COMPRESSOR_NONE = 0, + // Add new options here between _NONE and _END + // List of formats must be in sync with list in DNA_scene_types.h + MASTER_IMAGE_COMPRESSOR_JPEG = 1, + MASTER_IMAGE_COMPRESSOR_NVENCODER = 2, + // END must go last + MASTER_IMAGE_COMPRESSOR_END, + }; + static const MasterImageColorFormat DEFAULT_MASTER_IMAGE_COLOR_FORMAT = MASTER_IMAGE_COLOR_FORMAT_LINEAR; + static const MasterImageCompressor DEFAULT_MASTER_IMAGE_COMPRESSOR = MASTER_IMAGE_COMPRESSOR_JPEG; ClusterSessionParams(SessionMode session_mode, std::string master_address, int master_port, int num_servers, int master_compression_quality, MasterDenoiser master_denoiser, bool save_denoise_io, bool save_streamed_image, int save_every_n_images, bool save_cameras, const std::string & output_folder_path, - MasterImageColorFormat master_image_color_format) : + MasterImageColorFormat master_image_color_format, MasterImageCompressor master_image_compressor) : session_mode(session_mode), master_address(master_address), master_port(master_port), @@ -74,6 +86,7 @@ public: master_compression_quality(master_compression_quality), master_denoiser(master_denoiser), master_image_color_format(master_image_color_format), + master_image_compressor(master_image_compressor), save_denoise_io(save_denoise_io), save_streamed_image(save_streamed_image), save_every_n_images(save_every_n_images), @@ -89,6 +102,7 @@ public: master_compression_quality = 85; master_denoiser = MASTER_DENOISER_NONE; master_image_color_format = MASTER_IMAGE_COLOR_FORMAT_LINEAR; + master_image_compressor = MASTER_IMAGE_COMPRESSOR_JPEG; } bool modified(const ClusterSessionParams ¶ms) const @@ -103,6 +117,7 @@ public: save_every_n_images == params.save_every_n_images && save_cameras == params.save_cameras && master_image_color_format == params.master_image_color_format && + master_image_compressor == params.master_image_compressor && output_folder_path == params.output_folder_path); // modified method should not compare modify_object_params. // When modified method returns true a session is restarted. @@ -116,6 +131,7 @@ public: int master_compression_quality; MasterDenoiser master_denoiser; MasterImageColorFormat master_image_color_format; + MasterImageCompressor master_image_compressor; bool save_denoise_io = false; bool save_streamed_image = false; // When save_every_n_images is 10, we only save 0th, 10th, 20th, 30th images diff --git a/intern/cycles/cluster_rendering/libcluster/compression/nv_decoder.cpp b/intern/cycles/cluster_rendering/libcluster/compression/nv_decoder.cpp new file mode 100644 index 00000000000..56af6219728 --- /dev/null +++ b/intern/cycles/cluster_rendering/libcluster/compression/nv_decoder.cpp @@ -0,0 +1,262 @@ + +#include +#include +#include +#include + +#include "nv_decoder.h" +#include "../cuda_context_provider.h" +#include "../utils/cuda_utils.h" +#include "../utils/image.h" + +namespace cgr_libcluster { + +static const char * getVideoCodecString(cudaVideoCodec video_codec) { + static struct { + cudaVideoCodec codec; + const char *name; + } CodecToName [] = { + { cudaVideoCodec_MPEG1, "MPEG-1" }, + { cudaVideoCodec_MPEG2, "MPEG-2" }, + { cudaVideoCodec_MPEG4, "MPEG-4 (ASP)" }, + { cudaVideoCodec_VC1, "VC-1/WMV" }, + { cudaVideoCodec_H264, "AVC/H.264" }, + { cudaVideoCodec_JPEG, "M-JPEG" }, + { cudaVideoCodec_H264_SVC, "H.264/SVC" }, + { cudaVideoCodec_H264_MVC, "H.264/MVC" }, + { cudaVideoCodec_HEVC, "H.265/HEVC" }, + { cudaVideoCodec_VP8, "VP8" }, + { cudaVideoCodec_VP9, "VP9" }, + { cudaVideoCodec_AV1, "AV1" }, + { cudaVideoCodec_NumCodecs, "Invalid" }, + { cudaVideoCodec_YUV420, "YUV 4:2:0" }, + { cudaVideoCodec_YV12, "YV12 4:2:0" }, + { cudaVideoCodec_NV12, "NV12 4:2:0" }, + { cudaVideoCodec_YUYV, "YUYV 4:2:2" }, + { cudaVideoCodec_UYVY, "UYVY 4:2:2" }, + }; + if (video_codec >= 0 && video_codec <= cudaVideoCodec_NumCodecs) { + return CodecToName[video_codec].name; + } + for (int i = cudaVideoCodec_NumCodecs + 1; i < sizeof(CodecToName) / sizeof(CodecToName[0]); i++) { + if (video_codec == CodecToName[i].codec) { + return CodecToName[video_codec].name; + } + } + return "Unknown"; +} + +static const char * getVideoChromaFormatString(cudaVideoChromaFormat chroma_format) { + static struct { + cudaVideoChromaFormat chroma_format; + const char *name; + } ChromaFormatToName[] = { + { cudaVideoChromaFormat_Monochrome, "YUV 400 (Monochrome)" }, + { cudaVideoChromaFormat_420, "YUV 420" }, + { cudaVideoChromaFormat_422, "YUV 422" }, + { cudaVideoChromaFormat_444, "YUV 444" }, + }; + if (chroma_format >= 0 && chroma_format < sizeof(ChromaFormatToName) / sizeof(ChromaFormatToName[0])) { + return ChromaFormatToName[chroma_format].name; + } + return "Unknown"; +} + +static float getChromaHeightFactor(cudaVideoSurfaceFormat surface_format) { + float factor = 0.5; + switch (surface_format) { + case cudaVideoSurfaceFormat_NV12: + case cudaVideoSurfaceFormat_P016: + factor = 0.5; + break; + case cudaVideoSurfaceFormat_YUV444: + case cudaVideoSurfaceFormat_YUV444_16Bit: + factor = 1.0; + break; + } + return factor; +} + +static int getChromaPlaneCount(cudaVideoSurfaceFormat surface_format) { + int numPlane = 1; + switch (surface_format) { + case cudaVideoSurfaceFormat_NV12: + case cudaVideoSurfaceFormat_P016: + numPlane = 1; + break; + case cudaVideoSurfaceFormat_YUV444: + case cudaVideoSurfaceFormat_YUV444_16Bit: + numPlane = 2; + break; + } + return numPlane; +} + +NvDecoder::NvDecoder(CUcontext cuda_context): + m_cuda_context(cuda_context) { + LOG(INFO) << "NvDecoder constructor. Creating video parser"; + CUVIDPARSERPARAMS videoParserParameters = {}; + videoParserParameters.CodecType = cudaVideoCodec_H264; + videoParserParameters.ulMaxNumDecodeSurfaces = 1; + videoParserParameters.ulClockRate = 1000; + constexpr int low_latency_display_delay = 0; + videoParserParameters.ulMaxDisplayDelay = low_latency_display_delay; + videoParserParameters.pUserData = this; + videoParserParameters.pfnSequenceCallback = HandleVideoSequenceCallback; + videoParserParameters.pfnDecodePicture = HandlePictureDecodeCallback; + videoParserParameters.pfnDisplayPicture = HandlePictureDisplayCallback; + CUDA_API_CALL(cuvidCreateVideoParser(&m_parser, &videoParserParameters), THROW_IF_ERROR); + LOG(INFO) << "Created video parser"; +} + +NvDecoder::~NvDecoder() { + LOG(INFO) << "NvDecoder destructor"; + if (m_parser) { + LOG(INFO) << "Destroying video parser"; + CUDA_API_CALL(cuvidDestroyVideoParser(m_parser), DO_NOT_THROW); + } + if (m_decoder) { + LOG(INFO) << "Destroying video decoder"; + CUDA_API_CALL(cuvidDestroyDecoder(m_decoder), DO_NOT_THROW); + } + LOG(INFO) << "NvDecoder released resources and is destroyed"; +} + +// Return values: +// 0 : fail +// 1 : succeeded, but driver should not override CUVIDPARSERPARAMS::ulMaxNumDecodeSurfaces +// >1: succeeded, and driver should override CUVIDPARSERPARAMS::ulMaxNumDecodeSurfaces with this return value +int NvDecoder::HandleVideoSequence(CUVIDEOFORMAT *vdeo_format){ + LOG(INFO) << "HandleVideoSequence callback. Creating nvdecoder"; + LOG(INFO) << "Video Input Information:"; + LOG(INFO) << "\tCodec : " << getVideoCodecString(vdeo_format->codec); + LOG(INFO) << "\tSequence : " << (vdeo_format->progressive_sequence ? "Progressive" : "Interlaced"); + LOG(INFO) << "\tCoded size : [" << vdeo_format->coded_width << ", " << vdeo_format->coded_height << "]"; + LOG(INFO) << "\tDisplay area : [" << vdeo_format->display_area.left << ", " << vdeo_format->display_area.top << ", " + << vdeo_format->display_area.right << ", " << vdeo_format->display_area.bottom << "]"; + LOG(INFO) << "\tChroma : " << getVideoChromaFormatString(vdeo_format->chroma_format); + LOG(INFO) << "\tBit depth : " << vdeo_format->bit_depth_luma_minus8 + 8; + + CUVIDDECODECREATEINFO video_decode_create_info = { 0 }; + video_decode_create_info.CodecType = vdeo_format->codec; + video_decode_create_info.ChromaFormat = vdeo_format->chroma_format; + video_decode_create_info.OutputFormat = cudaVideoSurfaceFormat_NV12; + video_decode_create_info.bitDepthMinus8 = vdeo_format->bit_depth_luma_minus8; + video_decode_create_info.DeinterlaceMode = cudaVideoDeinterlaceMode_Weave; + video_decode_create_info.ulNumOutputSurfaces = 2; + video_decode_create_info.ulCreationFlags = cudaVideoCreate_PreferCUVID; + // This is how nvidia recommends calculating ulNumDecodeSurfaces here: + // https://developer.nvidia.com/blog/optimizing-video-memory-usage-with-the-nvdecode-api-and-nvidia-video-codec-sdk/ + video_decode_create_info.ulNumDecodeSurfaces = vdeo_format->min_num_decode_surfaces + 3; + video_decode_create_info.ulWidth = vdeo_format->coded_width; + video_decode_create_info.ulHeight = vdeo_format->coded_height; + video_decode_create_info.ulMaxWidth = video_decode_create_info.ulWidth; + video_decode_create_info.ulMaxHeight = video_decode_create_info.ulHeight; + video_decode_create_info.ulTargetWidth = video_decode_create_info.ulWidth; + video_decode_create_info.ulTargetHeight = video_decode_create_info.ulHeight; + + m_image_width_in_pixels = vdeo_format->display_area.right - vdeo_format->display_area.left; + // NV12/P016 output format width is 2 byte aligned because of U and V interleave + if (m_output_format == cudaVideoSurfaceFormat_NV12 || + m_output_format == cudaVideoSurfaceFormat_P016) { + m_image_width_in_pixels = (m_image_width_in_pixels + 1) & ~1; + } + m_luma_height = vdeo_format->display_area.bottom - vdeo_format->display_area.top; + m_bytes_per_pixel = video_decode_create_info.bitDepthMinus8 > 0 ? 2 : 1; + m_chroma_height = (int)(std::ceil(m_luma_height * getChromaHeightFactor(video_decode_create_info.OutputFormat))); + m_num_chroma_planes = getChromaPlaneCount(video_decode_create_info.OutputFormat); + m_surface_height = video_decode_create_info.ulTargetHeight; + const int size_ofdecoded_image_yuv_format_in_bytes = m_image_width_in_pixels * + (m_luma_height + (m_chroma_height * m_num_chroma_planes)) * m_bytes_per_pixel; + m_decoded_image_yuv.resize(size_ofdecoded_image_yuv_format_in_bytes); + + CUDAContextScope cuda_context_scope(m_cuda_context); + CUDA_API_CALL(cuvidCreateDecoder(&m_decoder, &video_decode_create_info), THROW_IF_ERROR); + return video_decode_create_info.ulNumDecodeSurfaces; +} + +void NvDecoder::decode(const std::vector & encoded_image_in, const int frame_id, + std::vector * decoded_image_out_ptr) { + if(!decoded_image_out_ptr) { + std::string message = "Pointer to decoded image buffer is null. There is no place for output image, break."; + LOG(ERROR) << message; + throw std::invalid_argument(message); + } + m_decoded_image_rgb_out_ptr = decoded_image_out_ptr; + CUVIDSOURCEDATAPACKET packet = { 0 }; + packet.payload = encoded_image_in.data(); + packet.payload_size = encoded_image_in.size(); + packet.flags = CUVID_PKT_ENDOFPICTURE | CUVID_PKT_TIMESTAMP; + packet.timestamp = 0; + if (encoded_image_in.size() == 0) { + packet.flags |= CUVID_PKT_ENDOFSTREAM; + } + CUDA_API_CALL(cuvidParseVideoData(m_parser, &packet), THROW_IF_ERROR); +} + +// 0: fail, >=1: succeeded +int NvDecoder::HandlePictureDecode(CUVIDPICPARAMS *pic_params) { + CUDAContextScope cuda_context_scope(m_cuda_context); + CUDA_API_CALL(cuvidDecodePicture(m_decoder, pic_params), THROW_IF_ERROR); + return 1; +} + +// 0: fail; >=1: succeeded +int NvDecoder::HandlePictureDisplay(CUVIDPARSERDISPINFO *parser_disp_info) { + CUVIDPROCPARAMS video_processing_parameters = {}; + video_processing_parameters.progressive_frame = parser_disp_info->progressive_frame; + video_processing_parameters.second_field = parser_disp_info->repeat_first_field + 1; + video_processing_parameters.top_field_first = parser_disp_info->top_field_first; + video_processing_parameters.unpaired_field = parser_disp_info->repeat_first_field < 0; + video_processing_parameters.output_stream = m_cuvid_stream; + + CUdeviceptr src_frame_device_ptr = 0; + unsigned int src_pitch = 0; + CUDAContextScope cuda_context_scope(m_cuda_context); + CUDA_API_CALL(cuvidMapVideoFrame(m_decoder, parser_disp_info->picture_index, &src_frame_device_ptr, + &src_pitch, &video_processing_parameters), THROW_IF_ERROR); + + CUVIDGETDECODESTATUS decode_status; + memset(&decode_status, 0, sizeof(decode_status)); + CUresult result = cuvidGetDecodeStatus(m_decoder, parser_disp_info->picture_index, &decode_status); + if (result == CUDA_SUCCESS && + (decode_status.decodeStatus == cuvidDecodeStatus_Error || + decode_status.decodeStatus == cuvidDecodeStatus_Error_Concealed)) { + LOG(INFO) << "Image decoding failed with status: " << decode_status.decodeStatus; + } + uint8_t *decoded_image_yuv_ptr = m_decoded_image_yuv.data(); + // Copy luma plane + CUDA_MEMCPY2D mem_cpy_2d = { 0 }; + mem_cpy_2d.srcMemoryType = CU_MEMORYTYPE_DEVICE; + mem_cpy_2d.srcDevice = src_frame_device_ptr; + mem_cpy_2d.srcPitch = src_pitch; + mem_cpy_2d.dstMemoryType = CU_MEMORYTYPE_HOST; + mem_cpy_2d.dstDevice = (CUdeviceptr)(mem_cpy_2d.dstHost = decoded_image_yuv_ptr); + mem_cpy_2d.dstPitch = m_device_frame_pitch ? m_device_frame_pitch : m_image_width_in_pixels * m_bytes_per_pixel; + mem_cpy_2d.WidthInBytes = m_image_width_in_pixels * m_bytes_per_pixel; + mem_cpy_2d.Height = m_luma_height; + CUDA_API_CALL(cuMemcpy2DAsync(&mem_cpy_2d, m_cuvid_stream), THROW_IF_ERROR); + // Copy chroma plane + // NVDEC output has luma height aligned by 2. Adjust chroma offset by aligning height + mem_cpy_2d.srcDevice = (CUdeviceptr)((uint8_t *)src_frame_device_ptr + mem_cpy_2d.srcPitch * ((m_surface_height + 1) & ~1)); + mem_cpy_2d.dstDevice = (CUdeviceptr)(mem_cpy_2d.dstHost = decoded_image_yuv_ptr + mem_cpy_2d.dstPitch * m_luma_height); + mem_cpy_2d.Height = m_chroma_height; + CUDA_API_CALL(cuMemcpy2DAsync(&mem_cpy_2d, m_cuvid_stream), THROW_IF_ERROR); + + if (m_num_chroma_planes == 2) { + mem_cpy_2d.srcDevice = (CUdeviceptr)((uint8_t *)src_frame_device_ptr + mem_cpy_2d.srcPitch * ((m_surface_height + 1) & ~1) * 2); + mem_cpy_2d.dstDevice = (CUdeviceptr)(mem_cpy_2d.dstHost = decoded_image_yuv_ptr + mem_cpy_2d.dstPitch * m_luma_height * 2); + mem_cpy_2d.Height = m_chroma_height; + CUDA_API_CALL(cuMemcpy2DAsync(&mem_cpy_2d, m_cuvid_stream), THROW_IF_ERROR); + } + CUDA_API_CALL(cuStreamSynchronize(m_cuvid_stream), THROW_IF_ERROR); + CUDA_API_CALL(cuvidUnmapVideoFrame(m_decoder, src_frame_device_ptr), THROW_IF_ERROR); + const int num_pixels = m_image_width_in_pixels * m_luma_height; + if(m_decoded_image_rgb_out_ptr->size() != num_pixels) { + m_decoded_image_rgb_out_ptr->resize(num_pixels); + } + yuv2Rgb(decoded_image_yuv_ptr, m_image_width_in_pixels, m_luma_height, m_decoded_image_rgb_out_ptr); + return 1; +} + +} diff --git a/intern/cycles/cluster_rendering/libcluster/compression/nv_decoder.h b/intern/cycles/cluster_rendering/libcluster/compression/nv_decoder.h new file mode 100644 index 00000000000..801f63e83c9 --- /dev/null +++ b/intern/cycles/cluster_rendering/libcluster/compression/nv_decoder.h @@ -0,0 +1,81 @@ +#ifndef __NV_DECODER_H__ +#define __NV_DECODER_H__ + +#ifdef WITH_CUDA_DYNLOAD + #include + // Do not use CUDA SDK headers when using CUEW + // The macro below is used by Optix SDK and is necessary to avoid DSO loading collision + // See device_optix.cpp for example. + #define OPTIX_DONT_INCLUDE_CUDA +#else + #include +#endif + +#include "nvcuvid.h" + +#include "../utils/logging.h" +#include "../utils/vector_types.h" // for uchar3 + +namespace cgr_libcluster { + +class NvDecoder { +public: + + NvDecoder(CUcontext cuda_context); + ~NvDecoder(); + + // Decodes input image, converts into rgb format and puts into decoded_image_out_ptr + // Decoder resizes output vector so it can accommodate a decoded image if needed + void decode(const std::vector & encoded_image_in, const int frame_id, + std::vector * decoded_image_out_ptr); + +private: + const CUcontext m_cuda_context; + + CUvideoparser m_parser = nullptr; + CUvideodecoder m_decoder = nullptr; + CUstream m_cuvid_stream = 0; + + unsigned int m_image_width_in_pixels = 0; + unsigned int m_luma_height = 0; + unsigned int m_chroma_height = 0; + unsigned int m_num_chroma_planes = 0; + int m_bytes_per_pixel = 1; + size_t m_device_frame_pitch = 0; + int m_surface_height = 0; + cudaVideoSurfaceFormat m_output_format = cudaVideoSurfaceFormat_NV12; + std::vector m_decoded_image_yuv; + std::vector * m_decoded_image_rgb_out_ptr = nullptr; + + // Callback function to be registered for getting a callback when decoding of sequence starts + static int CUDAAPI HandleVideoSequenceCallback(void *pUserData, CUVIDEOFORMAT *pVideoFormat) { + return ((NvDecoder *)pUserData)->HandleVideoSequence(pVideoFormat); + } + + // Callback function to be registered for getting a callback when a decoded frame is ready to be decoded + static int CUDAAPI HandlePictureDecodeCallback(void *pUserData, CUVIDPICPARAMS *pPicParams) { + return ((NvDecoder *)pUserData)->HandlePictureDecode(pPicParams); + } + + // Callback function to be registered for getting a callback when a decoded frame is available for display + static int CUDAAPI HandlePictureDisplayCallback(void *pUserData, CUVIDPARSERDISPINFO *pDispInfo) { + return ((NvDecoder *)pUserData)->HandlePictureDisplay(pDispInfo); + } + + // This function gets called when a sequence is ready to be decoded. The function also gets called + // when there is format change. It inits video decoder + int HandleVideoSequence(CUVIDEOFORMAT *pVideoFormat); + + // This function gets called when a picture is ready to be decoded. cuvidDecodePicture is called from + // this function to decode the picture + int HandlePictureDecode(CUVIDPICPARAMS *pPicParams); + + // This function gets called after a picture is decoded and available for display. Frames are fetched + // and stored in internal buffer + int HandlePictureDisplay(CUVIDPARSERDISPINFO *pDispInfo); + +}; + +} + +#endif diff --git a/intern/cycles/cluster_rendering/libcluster/compression/nv_encoder.cpp b/intern/cycles/cluster_rendering/libcluster/compression/nv_encoder.cpp index f7ceff803d2..2a88ae50f78 100644 --- a/intern/cycles/cluster_rendering/libcluster/compression/nv_encoder.cpp +++ b/intern/cycles/cluster_rendering/libcluster/compression/nv_encoder.cpp @@ -6,6 +6,7 @@ #include #include "nv_encoder.h" +#include "../cuda_context_provider.h" namespace cgr_libcluster { @@ -24,22 +25,56 @@ NvEncoder::NvEncoder(NV_ENC_BUFFER_FORMAT buffer_format, } NvEncoder::~NvEncoder() { + VLOG(3) << "NvEncoder destructor"; if(m_encoder_session_handle.get()) { - nvencode_api_function_list.nvEncDestroyBitstreamBuffer(m_encoder_session_handle.get(), m_bitstream_output_buffer.get()); - nvencode_api_function_list.nvEncUnregisterResource(m_encoder_session_handle.get(), m_input_buffer_registration.get()); - nvencode_api_function_list.nvEncDestroyEncoder(m_encoder_session_handle.get()); + NV_ENC_PIC_PARAMS end_of_input_stream_pic_arams = {NV_ENC_PIC_PARAMS_VER}; + end_of_input_stream_pic_arams.encodePicFlags = NV_ENC_PIC_FLAG_EOS; + NVENCSTATUS nv_error_code = m_nvencode_api_function_list.nvEncEncodePicture( + m_encoder_session_handle.get(), &end_of_input_stream_pic_arams); + if(nv_error_code != NV_ENC_SUCCESS) { + std::string message = "FATAL. nvEncEncodePicture call with end of stream failed with error: " + + std::to_string(nv_error_code); + VLOG(1) << message; + } else { + VLOG(3) << "Communicated end of stream to the NvEncoder"; + } + + nv_error_code = m_nvencode_api_function_list.nvEncUnregisterResource(m_encoder_session_handle.get(), + m_input_buffer_registration.get()); + if(nv_error_code != NV_ENC_SUCCESS) { + std::string message = "FATAL. NvEncoder input biffer un-registration failed with error: " + + std::to_string(nv_error_code); + VLOG(1) << message; + } else { + VLOG(3) << "Unregistered NvEncoder input biffer successfully"; + } + + nv_error_code = m_nvencode_api_function_list.nvEncDestroyBitstreamBuffer(m_encoder_session_handle.get(), + m_bitstream_output_buffer.get()); + if(nv_error_code != NV_ENC_SUCCESS) { + std::string message = "FATAL. nvEncDestroyBitstreamBuffer failed with error: " + + std::to_string(nv_error_code); + VLOG(1) << message; + } else { + VLOG(3) << "Destroyed NvEncoder bitstream buffer successfully"; + } + m_nvencode_api_function_list.nvEncDestroyEncoder(m_encoder_session_handle.get()); + VLOG(3) << "Destroyed NvEncoder session successfully"; } + if(m_cuda_context) { - cuCtxPushCurrent(m_cuda_context); + CUDAContextScope cuda_context_scope(m_cuda_context); cuMemFree(m_cuda_device_ptr.get()); - cuCtxPopCurrent(NULL); + VLOG(3) << "Released cuda input buffer successfully"; } + VLOG(3) << "NvEncoder released resources and is destroyed"; } void NvEncoder::initNvEncoder() { // most of values that are currently set here are taken from the NVIDIA samples NvEncoder.cpp // we can adjust them later on if needed + VLOG(3) << "Initializing NVENCODER for width: " << m_width << " height: " << m_height; uint32_t driver_version = 0; uint32_t header_version = (NVENCAPI_MAJOR_VERSION << 4) | NVENCAPI_MINOR_VERSION; NVENCSTATUS nv_error_code_v = NvEncodeAPIGetMaxSupportedVersion(&driver_version); @@ -56,15 +91,15 @@ void NvEncoder::initNvEncoder() { VLOG(1) << message; throw std::runtime_error(message); } - nvencode_api_function_list = { NV_ENCODE_API_FUNCTION_LIST_VER }; - NVENCSTATUS nv_error_code = NvEncodeAPICreateInstance(&nvencode_api_function_list); + m_nvencode_api_function_list = { NV_ENCODE_API_FUNCTION_LIST_VER }; + NVENCSTATUS nv_error_code = NvEncodeAPICreateInstance(&m_nvencode_api_function_list); if(nv_error_code != NV_ENC_SUCCESS) { std::string message = "FATAL. NvEncodeAPICreateInstance failed with error: " + std::to_string(nv_error_code); VLOG(1) << message; throw std::runtime_error(message); } - if(!nvencode_api_function_list.nvEncOpenEncodeSessionEx) { + if(!m_nvencode_api_function_list.nvEncOpenEncodeSessionEx) { std::string message = "FATAL. nvEncOpenEncodeSessionEx API not found"; VLOG(1) << message; throw std::runtime_error(message); @@ -74,13 +109,13 @@ void NvEncoder::initNvEncoder() { encode_session_ex_params.deviceType = NV_ENC_DEVICE_TYPE_CUDA; encode_session_ex_params.apiVersion = NVENCAPI_VERSION; void * encoder_session_handle; - nv_error_code = nvencode_api_function_list.nvEncOpenEncodeSessionEx(&encode_session_ex_params, &encoder_session_handle); + nv_error_code = m_nvencode_api_function_list.nvEncOpenEncodeSessionEx(&encode_session_ex_params, &encoder_session_handle); m_encoder_session_handle.init(encoder_session_handle, __FILE__, __FUNCTION__, __LINE__); if(nv_error_code != NV_ENC_SUCCESS) { std::string message = "FATAL. nvEncOpenEncodeSessionEx failed with error: " + std::to_string(nv_error_code); VLOG(1) << message; - nvencode_api_function_list.nvEncDestroyEncoder(m_encoder_session_handle.get()); + m_nvencode_api_function_list.nvEncDestroyEncoder(m_encoder_session_handle.get()); throw std::runtime_error(message); } if(!m_encoder_session_handle.get()) { @@ -92,66 +127,66 @@ void NvEncoder::initNvEncoder() { // extern/nvidia/Video_Codec_SDK_11.1.5/Interface/nvEncodeAPI.h // github link: // https://ghe.oculus-rep.com/FRL-Graphics-Research/distributed_blender_cycles/blob/cluster_blender_32_main_nvenc/extern/nvidia/Video_Codec_SDK_11.1.5/Interface/nvEncodeAPI.h#L1667 - NV_ENC_INITIALIZE_PARAMS encoder_init_params = { 0 }; - encoder_init_params.version = NV_ENC_INITIALIZE_PARAMS_VER; - GUID preset = NV_ENC_PRESET_P1_GUID; - encoder_init_params.encodeGUID = NV_ENC_CODEC_H264_GUID; - encoder_init_params.presetGUID = preset; - encoder_init_params.encodeWidth = m_width; - encoder_init_params.encodeHeight = m_height; - encoder_init_params.darWidth = m_width; - encoder_init_params.darHeight = m_height; - encoder_init_params.frameRateNum = 30; - encoder_init_params.frameRateDen = 1; - encoder_init_params.enablePTD = 1; - encoder_init_params.reportSliceOffsets = 0; - encoder_init_params.enableSubFrameWrite = 0; - encoder_init_params.maxEncodeWidth = m_width; - encoder_init_params.maxEncodeHeight = m_height; - encoder_init_params.enableMEOnlyMode = false; - encoder_init_params.enableOutputInVidmem = false; - encoder_init_params.tuningInfo = NV_ENC_TUNING_INFO_LOW_LATENCY; - //encoder_init_params.tuningInfo = NV_ENC_TUNING_INFO_HIGH_QUALITY; + m_encoder_init_params = { 0 }; + m_encoder_init_params.version = NV_ENC_INITIALIZE_PARAMS_VER; + GUID preset_guid = NV_ENC_PRESET_P1_GUID; + m_encoder_init_params.encodeGUID = NV_ENC_CODEC_H264_GUID; + m_encoder_init_params.presetGUID = preset_guid; + m_encoder_init_params.encodeWidth = m_width; + m_encoder_init_params.encodeHeight = m_height; + m_encoder_init_params.darWidth = m_width; + m_encoder_init_params.darHeight = m_height; + m_encoder_init_params.frameRateNum = 30; + m_encoder_init_params.frameRateDen = 1; + m_encoder_init_params.enablePTD = 1; + m_encoder_init_params.reportSliceOffsets = 0; + m_encoder_init_params.enableSubFrameWrite = 0; + m_encoder_init_params.maxEncodeWidth = m_width; + m_encoder_init_params.maxEncodeHeight = m_height; + m_encoder_init_params.enableMEOnlyMode = false; + m_encoder_init_params.enableOutputInVidmem = false; + m_encoder_init_params.tuningInfo = NV_ENC_TUNING_INFO_LOW_LATENCY; + //m_encoder_init_params.tuningInfo = NV_ENC_TUNING_INFO_HIGH_QUALITY; - NV_ENC_CONFIG encode_config = { 0 }; - encoder_init_params.encodeConfig = &encode_config; - NV_ENC_PRESET_CONFIG presetConfig_2 = { NV_ENC_PRESET_CONFIG_VER, { NV_ENC_CONFIG_VER } }; - nvencode_api_function_list.nvEncGetEncodePresetConfigEx(m_encoder_session_handle.get(), - NV_ENC_CODEC_H264_GUID, preset, encoder_init_params.tuningInfo, &presetConfig_2); - memcpy(encoder_init_params.encodeConfig, &presetConfig_2.presetCfg, sizeof(NV_ENC_CONFIG)); + m_encode_config = { 0 }; + m_encoder_init_params.encodeConfig = &m_encode_config; + NV_ENC_PRESET_CONFIG preset_config_ex = { NV_ENC_PRESET_CONFIG_VER, { NV_ENC_CONFIG_VER } }; + m_nvencode_api_function_list.nvEncGetEncodePresetConfigEx(m_encoder_session_handle.get(), + m_encoder_init_params.encodeGUID, preset_guid, m_encoder_init_params.tuningInfo, &preset_config_ex); + memcpy(m_encoder_init_params.encodeConfig, &preset_config_ex.presetCfg, sizeof(NV_ENC_CONFIG)); - encode_config.version = NV_ENC_CONFIG_VER; - encode_config.frameIntervalP = 1; - encode_config.gopLength = NVENC_INFINITE_GOPLENGTH; - encode_config.encodeCodecConfig.h264Config.idrPeriod = encode_config.gopLength; - encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CBR; - encode_config.rcParams.multiPass = NV_ENC_TWO_PASS_FULL_RESOLUTION; + m_encode_config.frameIntervalP = 1; + m_encode_config.gopLength = NVENC_INFINITE_GOPLENGTH; + m_encode_config.encodeCodecConfig.h264Config.idrPeriod = m_encode_config.gopLength; + m_encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CBR; + m_encode_config.rcParams.multiPass = NV_ENC_TWO_PASS_FULL_RESOLUTION; // This produces images with acceptable quality. Tested on Tesla and Trudy scenes on flat screen. // We may adjust the value of the bitrate based of results of further testing. - encode_config.rcParams.averageBitRate = 2800000; + m_encode_config.rcParams.averageBitRate = 2800000; // Below is bitrate calculation from the NVIDIA samples app. It's too low for us. // Keep it here for now for reference. Remove once bitrate calculation is finalised. - // encode_config.rcParams.averageBitRate = (static_cast( - // 5.0f * encoder_init_params.encodeWidth * encoder_init_params.encodeHeight) / + // m_encode_config.rcParams.averageBitRate = (static_cast( + // 5.0f * m_encoder_init_params.encodeWidth * m_encoder_init_params.encodeHeight) / // (width * height)) * 100000; - VLOG(3) << "Average bitrate: " << encode_config.rcParams.averageBitRate; - encode_config.rcParams.vbvBufferSize = (encode_config.rcParams.averageBitRate * encoder_init_params.frameRateDen / - encoder_init_params.frameRateNum) * 5; - encode_config.rcParams.maxBitRate = encode_config.rcParams.averageBitRate * 2; - encode_config.rcParams.vbvInitialDelay = encode_config.rcParams.vbvBufferSize; + VLOG(3) << "Average bitrate: " << m_encode_config.rcParams.averageBitRate; + m_encode_config.rcParams.vbvBufferSize = (m_encode_config.rcParams.averageBitRate * m_encoder_init_params.frameRateDen / + m_encoder_init_params.frameRateNum) * 5; + m_encode_config.rcParams.maxBitRate = m_encode_config.rcParams.averageBitRate * 2; + m_encode_config.rcParams.vbvInitialDelay = m_encode_config.rcParams.vbvBufferSize; - nv_error_code = nvencode_api_function_list.nvEncInitializeEncoder(m_encoder_session_handle.get(), &encoder_init_params); + nv_error_code = m_nvencode_api_function_list.nvEncInitializeEncoder(m_encoder_session_handle.get(), &m_encoder_init_params); if(nv_error_code != NV_ENC_SUCCESS) { std::string message = "FATAL. nvEncInitializeEncoder failed with error: " + std::to_string(nv_error_code); VLOG(1) << message; throw std::runtime_error(message); } + VLOG(3) << "NvEncoder initialization completed"; } void NvEncoder::allocateOutputBuffer() { NV_ENC_CREATE_BITSTREAM_BUFFER create_bitstream_buffer_struct = { NV_ENC_CREATE_BITSTREAM_BUFFER_VER }; - NVENCSTATUS nv_error_code = nvencode_api_function_list.nvEncCreateBitstreamBuffer(m_encoder_session_handle.get(), + NVENCSTATUS nv_error_code = m_nvencode_api_function_list.nvEncCreateBitstreamBuffer(m_encoder_session_handle.get(), &create_bitstream_buffer_struct); if(nv_error_code != NV_ENC_SUCCESS) { std::string message = "FATAL. nvEncCreateBitstreamBuffer failed with error: " @@ -165,14 +200,7 @@ void NvEncoder::allocateOutputBuffer() { } void NvEncoder::allocateInputBuffer() { - CUresult cu_result = cuCtxPushCurrent(m_cuda_context); - if(cu_result != CUDA_SUCCESS) { - const char *error_name = NULL; - cuGetErrorName(cu_result, &error_name); - std::string message = "FATAL. cuCtxPushCurrent failed with error: " + std::string(error_name); - VLOG(1) << message; - throw std::runtime_error(message); - } + CUDAContextScope cuda_context_scope(m_cuda_context); const int chroma_height = getChromaHeight(m_buffer_format, m_height); const int height_in_rows = m_height + chroma_height; const int width_in_bytes = getWidthInBytes(m_buffer_format, m_width); @@ -180,7 +208,7 @@ void NvEncoder::allocateInputBuffer() { CUdeviceptr cuda_device_ptr; VLOG(3) << "Allocating input buffer with cuMemAllocPitch cuda_pitch: " << m_cuda_pitch.get() << " width_in_bytes: " << width_in_bytes << " height in rows: " << height_in_rows; - cu_result = cuMemAllocPitch((CUdeviceptr *)&cuda_device_ptr, + CUresult cu_result = cuMemAllocPitch((CUdeviceptr *)&cuda_device_ptr, &cuda_pitch, width_in_bytes, height_in_rows, @@ -195,14 +223,6 @@ void NvEncoder::allocateInputBuffer() { m_cuda_pitch.init(cuda_pitch, __FILE__, __FUNCTION__, __LINE__); m_cuda_device_ptr.init(cuda_device_ptr, __FILE__, __FUNCTION__, __LINE__); VLOG(3) << "Successfully allocated input buffer with cuMemAllocPitch"; - cu_result = cuCtxPopCurrent(NULL); - if(cu_result != CUDA_SUCCESS) { - const char *error_name = NULL; - cuGetErrorName(cu_result, &error_name); - std::string message = "FATAL. cuCtxPopCurrent failed with error: " + std::string(error_name); - VLOG(1) << message; - throw std::runtime_error(message); - } VLOG(3) << "Input CUDA buffer allocation completed"; registerInputResources(cuda_device_ptr); } @@ -221,7 +241,7 @@ void NvEncoder::registerInputResources(CUdeviceptr cuda_device_ptr) { register_resource_struct.pOutputFencePoint = nullptr; VLOG(3) << "nvEncRegisterResource with width: " << register_resource_struct.width << " height: " << register_resource_struct.height << " pitch: " << register_resource_struct.pitch; - NVENCSTATUS nv_error_code = nvencode_api_function_list.nvEncRegisterResource(m_encoder_session_handle.get(), + NVENCSTATUS nv_error_code = m_nvencode_api_function_list.nvEncRegisterResource(m_encoder_session_handle.get(), ®ister_resource_struct); if(nv_error_code != NV_ENC_SUCCESS) { std::string message = "FATAL. nvEncRegisterResource failed with error: " @@ -245,41 +265,56 @@ void NvEncoder::registerInputResources(CUdeviceptr cuda_device_ptr) { VLOG(3) << "Successfully registered input buffer resource"; } -bool NvEncoder::getEncodedBuffer(NV_ENC_MAP_INPUT_RESOURCE &map_input_resource, AutoEnlargingBuffer &encoded_buffer_out, +bool NvEncoder::getEncodedBuffer(NV_ENC_MAP_INPUT_RESOURCE &map_input_resource, std::vector &encoded_buffer_out, high_resolution_clock::time_point & encoding_done) const { NV_ENC_LOCK_BITSTREAM lock_bitstream_data = { NV_ENC_LOCK_BITSTREAM_VER }; lock_bitstream_data.outputBitstream = m_bitstream_output_buffer.get(); lock_bitstream_data.doNotWait = false; - NVENCSTATUS nv_error_code = nvencode_api_function_list.nvEncLockBitstream(m_encoder_session_handle.get(), &lock_bitstream_data); + NVENCSTATUS nv_error_code = m_nvencode_api_function_list.nvEncLockBitstream(m_encoder_session_handle.get(), &lock_bitstream_data); if(nv_error_code != NV_ENC_SUCCESS) { VLOG(1) << "ERROR. nvEncLockBitstream failed with error: " << nv_error_code; return false; } encoding_done = high_resolution_clock::now(); uint8_t * encoded_data_ptr = (uint8_t *)lock_bitstream_data.bitstreamBufferPtr; - encoded_buffer_out.insert(&encoded_data_ptr[0], lock_bitstream_data.bitstreamSizeInBytes); - nv_error_code = nvencode_api_function_list.nvEncUnlockBitstream(m_encoder_session_handle.get(), lock_bitstream_data.outputBitstream); + encoded_buffer_out.assign(encoded_data_ptr, encoded_data_ptr + lock_bitstream_data.bitstreamSizeInBytes); + nv_error_code = m_nvencode_api_function_list.nvEncUnlockBitstream(m_encoder_session_handle.get(), lock_bitstream_data.outputBitstream); if(nv_error_code != NV_ENC_SUCCESS) { VLOG(1) << "ERROR. nvEncUnlockBitstream failed with error: " << nv_error_code; return false; } - nv_error_code = nvencode_api_function_list.nvEncUnmapInputResource(m_encoder_session_handle.get(), map_input_resource.mappedResource); + nv_error_code = m_nvencode_api_function_list.nvEncUnmapInputResource(m_encoder_session_handle.get(), map_input_resource.mappedResource); if(nv_error_code != NV_ENC_SUCCESS) { VLOG(1) << "ERROR. nvEncUnmapInputResource failed with error: " << nv_error_code; return false; } std::chrono::duration copy_to_cpu_time_sec = high_resolution_clock::now() - encoding_done; - VLOG(3) << "Time to copy encoded image buffer to CPU memory: " << copy_to_cpu_time_sec.count() << " sec"; + VLOG(3) << "Time to copy encoded image buffer to CPU memory: " << copy_to_cpu_time_sec.count() << + " sec. Size: " << encoded_buffer_out.size(); return true; } -bool NvEncoder::encode(const uint8_t* input_buffer_on_host, AutoEnlargingBuffer &encoded_buffer_out) const { +// Do explicit template instantiation for encode() and +// copyInputBufferIntoGpuMappedMemory() methods (see below) for the follow reasons: +// we only support buffers in the CPU/host memory as uint8_t* and buffers in the GPU/device memory as CUdeviceptr +// So instantiate explicitly only for these types. This also allows us to have template +// methods implementation in the .cpp file instead of .h to keep header clean. + +// Explicit template instantiation of the encode for the input buffers in cpu memory as uint8_t* +template bool NvEncoder::encode(const uint8_t*, std::vector &) const; + +// Explicit template instantiation of the encode for the input buffers in gpu memory as CUdeviceptr +template bool NvEncoder::encode(const CUdeviceptr, std::vector &) const; + +template +bool NvEncoder::encode(const T input_buffer, std::vector &encoded_buffer_out) const { + CUDAContextScope cuda_context_scope(m_cuda_context); if(m_encoder_session_handle.get() == nullptr) { VLOG(1) << "ERROR. encoder_session_handle is null. Encoder is not initialised or initialization failed."; return false; } NV_ENC_MAP_INPUT_RESOURCE map_input_resource = { NV_ENC_MAP_INPUT_RESOURCE_VER }; - if(!copyInputBufferToGpu(input_buffer_on_host, map_input_resource)) { + if(!copyInputBufferIntoGpuMappedMemory(input_buffer, map_input_resource)) { return false; } auto start_encoding = high_resolution_clock::now(); @@ -305,24 +340,31 @@ bool NvEncoder::encode(const uint8_t* input_buffer_on_host, AutoEnlargingBuffer< } const NVENCSTATUS NvEncoder::startEncoding(NV_ENC_MAP_INPUT_RESOURCE & map_input_resource) const { - NV_ENC_PIC_PARAMS picParams = {}; - picParams.version = NV_ENC_PIC_PARAMS_VER; - picParams.pictureStruct = NV_ENC_PIC_STRUCT_FRAME; - picParams.inputBuffer = map_input_resource.mappedResource; - picParams.bufferFmt = m_buffer_format; - picParams.inputWidth = m_width; - picParams.inputHeight = m_height; - picParams.outputBitstream = m_bitstream_output_buffer.get(); - picParams.completionEvent = nullptr; - return nvencode_api_function_list.nvEncEncodePicture(m_encoder_session_handle.get(), &picParams); + NV_ENC_PIC_PARAMS pic_params = {}; + pic_params.version = NV_ENC_PIC_PARAMS_VER; + pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME; + pic_params.inputBuffer = map_input_resource.mappedResource; + pic_params.bufferFmt = m_buffer_format; + pic_params.inputWidth = m_width; + pic_params.inputHeight = m_height; + pic_params.outputBitstream = m_bitstream_output_buffer.get(); + pic_params.completionEvent = nullptr; + return m_nvencode_api_function_list.nvEncEncodePicture(m_encoder_session_handle.get(), &pic_params); } -bool NvEncoder::copyInputBufferToGpu(const uint8_t* input_buffer_on_host, NV_ENC_MAP_INPUT_RESOURCE &map_input_resource) const { +// Explicit template instantiation of the copyInputBufferIntoGpuMappedMemory for the input buffers in cpu memory as uint8_t* +template bool NvEncoder::copyInputBufferIntoGpuMappedMemory(const uint8_t*, NV_ENC_MAP_INPUT_RESOURCE &) const; + +// Explicit template instantiation of the copyInputBufferIntoGpuMappedMemory for the input buffers in gpu memory as CUdeviceptr +template bool NvEncoder::copyInputBufferIntoGpuMappedMemory(const CUdeviceptr, NV_ENC_MAP_INPUT_RESOURCE &) const; + +template +bool NvEncoder::copyInputBufferIntoGpuMappedMemory(T input_buffer, + NV_ENC_MAP_INPUT_RESOURCE &map_input_resource) const { auto start_copy_to_gpu = high_resolution_clock::now(); const int width_in_bytes = getWidthInBytes(m_buffer_format, m_width); CUDA_MEMCPY2D memcpy2d = { 0 }; - memcpy2d.srcMemoryType = CU_MEMORYTYPE_HOST; - memcpy2d.srcHost = input_buffer_on_host; + setSourceBuffer(input_buffer, memcpy2d); memcpy2d.srcPitch = getWidthInBytes(m_buffer_format, m_width); memcpy2d.dstMemoryType = CU_MEMORYTYPE_DEVICE; memcpy2d.dstDevice = m_cuda_device_ptr.get(); @@ -331,7 +373,7 @@ bool NvEncoder::copyInputBufferToGpu(const uint8_t* input_buffer_on_host, NV_ENC memcpy2d.Height = m_height; cuMemcpy2D(&memcpy2d); for (int i = 0; i < m_src_chroma_offsets.get().size(); ++i) { - memcpy2d.srcHost = (input_buffer_on_host + m_src_chroma_offsets.get()[i]); + setSourceBuffer(input_buffer + m_src_chroma_offsets.get()[i], memcpy2d); memcpy2d.dstDevice = (CUdeviceptr)((uint8_t *)m_cuda_device_ptr.get() + m_dst_chroma_offsets.get()[i]); memcpy2d.srcPitch = getChromaPitch(m_buffer_format, memcpy2d.srcPitch); memcpy2d.dstPitch = getChromaPitch(m_buffer_format, memcpy2d.dstPitch); @@ -340,7 +382,7 @@ bool NvEncoder::copyInputBufferToGpu(const uint8_t* input_buffer_on_host, NV_ENC cuMemcpy2D(&memcpy2d); } map_input_resource.registeredResource = m_input_buffer_registration.get(); - NVENCSTATUS nv_error_code = nvencode_api_function_list.nvEncMapInputResource(m_encoder_session_handle.get(), &map_input_resource); + NVENCSTATUS nv_error_code = m_nvencode_api_function_list.nvEncMapInputResource(m_encoder_session_handle.get(), &map_input_resource); if(nv_error_code != NV_ENC_SUCCESS) { VLOG(3) << "ERROR. nvEncMapInputResource failed with error: " << std::to_string(nv_error_code); return false; @@ -350,6 +392,16 @@ bool NvEncoder::copyInputBufferToGpu(const uint8_t* input_buffer_on_host, NV_ENC return true; } +void NvEncoder::setSourceBuffer(const uint8_t * input_buffer, CUDA_MEMCPY2D &memcpy2d) const { + memcpy2d.srcHost = input_buffer; + memcpy2d.srcMemoryType = CU_MEMORYTYPE_HOST; +} + +void NvEncoder::setSourceBuffer(const CUdeviceptr input_buffer, CUDA_MEMCPY2D &memcpy2d) const { + memcpy2d.srcDevice = input_buffer; + memcpy2d.srcMemoryType = CU_MEMORYTYPE_DEVICE; +} + uint32_t NvEncoder::getWidthInBytes(const NV_ENC_BUFFER_FORMAT buffer_format, const uint32_t width) const { switch (buffer_format) { case NV_ENC_BUFFER_FORMAT_NV12: diff --git a/intern/cycles/cluster_rendering/libcluster/compression/nv_encoder.h b/intern/cycles/cluster_rendering/libcluster/compression/nv_encoder.h index 98849e249e9..84786dc909f 100644 --- a/intern/cycles/cluster_rendering/libcluster/compression/nv_encoder.h +++ b/intern/cycles/cluster_rendering/libcluster/compression/nv_encoder.h @@ -1,76 +1,36 @@ #ifndef __NV_ENCODER_H__ #define __NV_ENCODER_H__ +#include #include -//#include -# ifdef WITH_CUDA_DYNLOAD -# include "cuew.h" -# else -# include -# include -# endif + +#ifdef WITH_CUDA_DYNLOAD + #include + // Do not use CUDA SDK headers when using CUEW + // The macro below is used by Optix SDK and is necessary to avoid DSO loading collision + // See device_optix.cpp for example. + #define OPTIX_DONT_INCLUDE_CUDA +#else + #include +#endif #include "final.h" #include "nvEncodeAPI.h" +#include "../utils/logging.h" namespace cgr_libcluster { using namespace std::chrono; -// This class holds preallocated buffer with requested initial size. -// It enlarges the buffer automatically if it cannot accomodate incoming data -// Buffer does not shrink, it only enlarges when needed to reduce amount of memory re-allocations. -template -class AutoEnlargingBuffer { -public: - AutoEnlargingBuffer(size_t init_size){ - if(init_size > 0) { - resize(init_size); - } - } - - ~AutoEnlargingBuffer(){ - delete [] data_buffer; - } - - void insert(const T* input_data, const size_t input_data_size) { - if(input_data_size > m_capacity) { - VLOG(3) << "Current capacity of the AutoEnlargingBuffer: " << m_capacity << - " Reallocating to support larger data size: " << input_data_size; - resize(input_data_size); - } - memcpy(data_buffer, input_data, input_data_size); - m_data_size = input_data_size; - } - - T* data() const { - return data_buffer; - } - - size_t size() const { - return m_data_size; - } - -private: - T* data_buffer = nullptr; - size_t m_data_size = 0; - size_t m_capacity = 0; - - void resize(const size_t new_buffer_size) { - delete [] data_buffer; - data_buffer = new T[new_buffer_size]; - m_capacity = new_buffer_size; - m_data_size = 0; - } -}; - class NvEncoder { public: - static const NV_ENC_BUFFER_FORMAT BUFFER_FORMAT_FOR_IMAGES_FROM_MASTER = NV_ENC_BUFFER_FORMAT_ABGR; NvEncoder(NV_ENC_BUFFER_FORMAT buffer_format, CUcontext cuda_context, const int width, const int height); ~NvEncoder(); - bool encode(const uint8_t* input_buffer_on_host, AutoEnlargingBuffer &encoded_buffer_out) const; + + // Encodes an image within the input_buffer + template + bool encode(const T input_buffer, std::vector &encoded_buffer_out) const; private: static constexpr int DEVICE_NUM = 0; @@ -80,7 +40,9 @@ private: const int m_width; const int m_height; const NV_ENC_BUFFER_FORMAT m_buffer_format; - NV_ENCODE_API_FUNCTION_LIST nvencode_api_function_list; + NV_ENC_INITIALIZE_PARAMS m_encoder_init_params; + NV_ENC_CONFIG m_encode_config; + NV_ENCODE_API_FUNCTION_LIST m_nvencode_api_function_list; Final m_encoder_session_handle; Final m_cuda_pitch; Final m_cuda_device_ptr; @@ -111,11 +73,17 @@ private: // Registers preallocated input CUDA buffer for the NvEncoder with nvEncRegisterResource void registerInputResources(CUdeviceptr cuda_device_ptr); - // Copies input image buffer (on CPU) to the preallocated and registered encoder input buffer on GPU (CUDA device) - bool copyInputBufferToGpu(const uint8_t* input_buffer_on_host, NV_ENC_MAP_INPUT_RESOURCE &map_input_resource) const; + // Copies input image buffer (on CPU or GPU memory) to the preallocated and registered + // encoder input buffer on GPU (CUDA device) + template + bool copyInputBufferIntoGpuMappedMemory(T input_buffer, NV_ENC_MAP_INPUT_RESOURCE &map_input_resource) const; + void setSourceBuffer(const uint8_t * input_buffer, CUDA_MEMCPY2D &memcpy2d) const; + void setSourceBuffer(const CUdeviceptr input_buffer, CUDA_MEMCPY2D &memcpy2d) const; + // Copies encoded image from the encoder to the preallocated output buffer on CPU - bool getEncodedBuffer(NV_ENC_MAP_INPUT_RESOURCE &map_input_resource, AutoEnlargingBuffer &encoded_buffer_out, + bool getEncodedBuffer(NV_ENC_MAP_INPUT_RESOURCE &map_input_resource, + std::vector &encoded_buffer_out, high_resolution_clock::time_point & encoding_done) const; // Starts image encoding diff --git a/intern/cycles/cluster_rendering/libcluster/compression/turbojpeg_compressor.cpp b/intern/cycles/cluster_rendering/libcluster/compression/turbojpeg_compressor.cpp new file mode 100644 index 00000000000..2b53b676cfb --- /dev/null +++ b/intern/cycles/cluster_rendering/libcluster/compression/turbojpeg_compressor.cpp @@ -0,0 +1,124 @@ + +//#include "./utils/timer.h" // for scoped_timer +#include "../utils/logging.h" +#include "turbojpeg_compressor.h" + +namespace cgr_libcluster { + +TurbojpegCompressor::TurbojpegCompressor() { + m_jpeg_compressor = tjInitCompress(); + m_jpeg_decompressor = tjInitDecompress(); +} + +TurbojpegCompressor::~TurbojpegCompressor() { + if (m_jpeg_compressor != nullptr) { + tjDestroy(m_jpeg_compressor); + } + if (m_jpeg_decompressor != nullptr) { + tjDestroy(m_jpeg_decompressor); + } +} + +//#define TIME_JPEG +size_t TurbojpegCompressor::compress(void* src_buffer, int width, int height, int compression_quality, + unsigned char*& jpeg_image) { + // Convert buffer to unsigned char * 3 channels + const int subsampling = TJSAMP_444; + size_t jpeg_length = 0; // tjCompress2 will allocate the jpeg_image buffer + jpeg_image = nullptr; + +#ifdef TIME_JPEG + struct timespec start_time, end_time; + clock_gettime(CLOCK_MONOTONIC, &start_time); +#endif + if (m_jpeg_compressor == nullptr) { + LOG(ERROR) << "Cannot initialize JPEG compressor"; + return 0; + } + int jpeg_error = tjCompress2(m_jpeg_compressor, + (unsigned char*) src_buffer, + width, + 0, + height, + TJPF_RGB, + &jpeg_image, + (unsigned long *)&jpeg_length, + subsampling, + compression_quality, + TJFLAG_FASTDCT); + if (jpeg_error < 0) { + const char *jpeg_error_str = tjGetErrorStr(); + LOG(ERROR) << "JPEG compression error: " << jpeg_error_str; + return 0; + } + +#ifdef TIME_JPEG + clock_gettime(CLOCK_MONOTONIC, &end_time); + // ms time + double elapsed_time = (end_time.tv_nsec - start_time.tv_nsec) / 1e6; + LOG(INFO) << "TIMING: JPEG compression: " << elapsed_time << "ms" + << ", resolution " << w << "x" << h + << ", sizes " << src_buffer.size() << " (" << jpeg_length << ")"; +#endif + return jpeg_length; +} + +bool TurbojpegCompressor::decompress(std::vector & jpeg_image_buffer, int & width_out, int & height_out, + std::vector & decompressed_image_out) { +#ifdef TIME_JPEG + struct timespec start_time, end_time; + clock_gettime(CLOCK_MONOTONIC, &start_time); +#endif + // Use TurboJPEG to decompress the buffer + int subsampling = 0; + if (m_jpeg_decompressor == nullptr) { + LOG(ERROR) << "Cannot initialize JPEG decompressor"; + return false; + } + int width = 0, height = 0; + int jpeg_error = tjDecompressHeader2(m_jpeg_decompressor, jpeg_image_buffer.data(), + jpeg_image_buffer.size(), &width, &height, &subsampling); + if (jpeg_error < 0) { + LOG(ERROR) << "Cannot decode JPEG header from incoming image buffer"; + return false; + } + width_out = width; + height_out = height; + const size_t num_pixels = width_out * height_out; + if(decompressed_image_out.size() != num_pixels) { + decompressed_image_out.resize(num_pixels); + } + jpeg_error = tjDecompress2(m_jpeg_decompressor, + jpeg_image_buffer.data(), + jpeg_image_buffer.size(), + (unsigned char*) decompressed_image_out.data(), + width_out, + 0, + height_out, + TJPF_RGB, + TJFLAG_ACCURATEDCT); + //tjDestroy(jpeg_decompressor); // move to d-tor? + if (jpeg_error < 0) { + const char *jpeg_error_str = tjGetErrorStr(); + LOG(ERROR) << "JPEG decompression error: " << jpeg_error_str; + return false; + } + +#ifdef TIME_JPEG + clock_gettime(CLOCK_MONOTONIC, &end_time); + // ms time + double elapsed_time = (end_time.tv_nsec - start_time.tv_nsec) / 1e6; + LOG(INFO) << "TIMING: JPEG decompression: " << elapsed_time << "ms" + << ", resolution " << w << "x" << h + << ", sizes " << jpeg_image_buffer.size() << " (" << dst_buffer.size() << ")"; +#endif + + return true; +} + +void TurbojpegCompressor::free(uint8_t *memory) { + tjFree(memory); +} + + +} // cgr_libcluster diff --git a/intern/cycles/cluster_rendering/libcluster/compression/turbojpeg_compressor.h b/intern/cycles/cluster_rendering/libcluster/compression/turbojpeg_compressor.h new file mode 100644 index 00000000000..a3ef6761e2d --- /dev/null +++ b/intern/cycles/cluster_rendering/libcluster/compression/turbojpeg_compressor.h @@ -0,0 +1,34 @@ +#ifndef __TURBOJPEG_COMPRESSOR_H__ +#define __TURBOJPEG_COMPRESSOR_H__ + +#include + +#include + +#include "../utils/vector_types.h" // for uchar3 + +//#include "net_camera.h" + +namespace cgr_libcluster { + +class TurbojpegCompressor { + public: + TurbojpegCompressor(); + ~TurbojpegCompressor(); + + // Compress the image buffer into a jpeg image + size_t compress(void* src_buffer, int width, int height, int compression_quality, unsigned char*& jpeg_image) ; + // Decompress a jpeg stream cbuffer into the StreamedImage + bool decompress(std::vector & jpeg_image_buffer, int & width_out, int & height_out, + std::vector & decompressed_image_out); + + void free(uint8_t *memory); + + private: + tjhandle m_jpeg_compressor = nullptr; + tjhandle m_jpeg_decompressor = nullptr; +}; + +} // cgr_libcluster + +#endif diff --git a/intern/cycles/cluster_rendering/libcluster/cuda_context_provider.h b/intern/cycles/cluster_rendering/libcluster/cuda_context_provider.h new file mode 100644 index 00000000000..c1e629d496d --- /dev/null +++ b/intern/cycles/cluster_rendering/libcluster/cuda_context_provider.h @@ -0,0 +1,92 @@ +#ifndef __CUDA_CONTEXT_PROVIDER_H__ +#define __CUDA_CONTEXT_PROVIDER_H__ + +#include "./utils/logging.h" + +#ifdef WITH_CUDA_DYNLOAD + #include + // Do not use CUDA SDK headers when using CUEW + // The macro below is used by Optix SDK and is necessary to avoid DSO loading collision + // See device_optix.cpp for example. + #define OPTIX_DONT_INCLUDE_CUDA +# else +# include +#endif + +namespace cgr_libcluster { + +struct CUDAContextScope { + CUDAContextScope(CUcontext ctx) { + CUresult cu_result = cuCtxPushCurrent(ctx); + if(cu_result != CUDA_SUCCESS) { + const char *error_name = NULL; + cuGetErrorName(cu_result, &error_name); + std::string message = "FATAL. cuCtxPushCurrent failed with error: " + std::string(error_name); + LOG(ERROR) << message; + throw std::runtime_error(message); + } + } + + ~CUDAContextScope() { + CUresult cu_result = cuCtxPopCurrent(NULL); + if(cu_result != CUDA_SUCCESS) { + const char *error_name = NULL; + cuGetErrorName(cu_result, &error_name); + std::string message = "FATAL. cuCtxPopCurrent failed with error: " + std::string(error_name); + LOG(ERROR) << message; + throw std::runtime_error(message); + } + } +}; + +class CudaContextProvider { + +public: + + static CUcontext getPrimaryContext(const int device_num) { + static CUcontext cuda_context = 0; + if(cuda_context) { + return cuda_context; + } + #ifdef WITH_CUDA_DYNLOAD + LOG(INFO) << "WITH_CUDA_DYNLOAD is on, call cuewInit to load CUDA library"; + // When we run as a part of the blender the CUDA lib is normally already loaded by CUDADevice + // However it's not loaded when is called from unit tests so call cuewInit here to be sure + // that CUDA lib is loaded, it's safe to call cuewInit multiple times + if (cuewInit(CUEW_INIT_CUDA) != CUEW_SUCCESS) { + throw std::runtime_error("Error. CUEW failed to load CUDA lib"); + } + #endif + + CUdevice cuda_device = 0; + CUresult cu_result = cuDeviceGet(&cuda_device, device_num); + // CUDA is normally initialised by CUDADevice however it's not initialised + // when is called from unit tests, check if CUDA is initialised, run cuInit if not. + if(cu_result == CUDA_ERROR_NOT_INITIALIZED) { + LOG(WARNING) << "Cuda is not initialised, run cuInit"; + cu_result = cuInit(0); + if (cu_result != CUDA_SUCCESS) { + std::string message = "Error. Cannot initialise CUDA, cuInit failed " + "with error code: " + std::to_string(cu_result); + throw std::runtime_error(message); + } + cu_result = cuDeviceGet(&cuda_device, device_num); + } + if(cu_result != CUDA_SUCCESS) { + std::string message = "Error. Cannot get primary cuda context due to cuDeviceGet failed " + "with error code: " + std::to_string(cu_result); + throw std::runtime_error(message); + } + cu_result = cuDevicePrimaryCtxRetain(&cuda_context, cuda_device); + if(cu_result != CUDA_SUCCESS) { + std::string message = "Error. Failed to get primary cuda context " + "with error code: " + std::to_string(cu_result); + throw std::runtime_error(message); + } + return cuda_context; + } +}; + +} // namespace cgr_libcluster + +#endif diff --git a/intern/cycles/cluster_rendering/libcluster/denoising/denoising_context.cpp b/intern/cycles/cluster_rendering/libcluster/denoising/denoising_context.cpp index 19ac3856cc6..6bef4ee414b 100644 --- a/intern/cycles/cluster_rendering/libcluster/denoising/denoising_context.cpp +++ b/intern/cycles/cluster_rendering/libcluster/denoising/denoising_context.cpp @@ -1,3 +1,6 @@ +#ifdef WITH_CUDA + #include "../cuda_context_provider.h" +#endif #include "denoising_context.h" #include "master_oidn_denoiser.h" #ifdef WITH_OPTIX @@ -26,7 +29,8 @@ MasterDenoiser * DenoisersProvider::makeOptixDenoiser(bool save_denoise_io, int const std::string & output_folder_path, bool is_denoising_passes_on, const ImageOutputProvider & image_output_provider, int max_img_width, int max_img_height) const { try { - return new MasterOptixDenoiser(save_denoise_io, save_every_n_images, output_folder_path, + CUcontext cuda_context = CudaContextProvider::getPrimaryContext(MasterOptixDenoiser::DEVICE_NUM); + return new MasterOptixDenoiser(cuda_context, save_denoise_io, save_every_n_images, output_folder_path, is_denoising_passes_on, image_output_provider, max_img_width, max_img_height); } catch(std::runtime_error & ex) { LOG(ERROR) << "DenoisersProvider::makeDenoiser. ERROR. Failed to create an instance of MasterOptixDenoiser due to: " diff --git a/intern/cycles/cluster_rendering/libcluster/denoising/master_optix_denoiser.cpp b/intern/cycles/cluster_rendering/libcluster/denoising/master_optix_denoiser.cpp index b704370eab8..a61d93d05b5 100644 --- a/intern/cycles/cluster_rendering/libcluster/denoising/master_optix_denoiser.cpp +++ b/intern/cycles/cluster_rendering/libcluster/denoising/master_optix_denoiser.cpp @@ -1,3 +1,6 @@ + +#include "../cuda_context_provider.h" +#include "../utils/cuda_utils.h" #include "../utils/timer.h" // for scoped_timer #include "../utils/logging.h" #include "../server_image.h" // for ServerImage; @@ -7,28 +10,24 @@ #include #include + namespace cgr_libcluster { -struct CUDAContextScope { - CUDAContextScope(CUcontext ctx) { - cuCtxPushCurrent(ctx); - } - - ~CUDAContextScope() { - cuCtxPopCurrent(NULL); - } -}; - // Constructor throws exception if it cannot create an instance. // This prevents creation of the object that is not functional. // Doesn't affect rendering performance as happens once at init stage -MasterOptixDenoiser::MasterOptixDenoiser(bool save_denoise_io, int save_every_n_images, const std::string & output_folder_path, - bool is_denoising_passes_on, const ImageOutputProvider & image_output_provider, int max_img_width, int max_img_height) : +MasterOptixDenoiser::MasterOptixDenoiser(CUcontext cuda_context, bool save_denoise_io, int save_every_n_images, + const std::string & output_folder_path, bool is_denoising_passes_on, + const ImageOutputProvider & image_output_provider, int max_img_width, int max_img_height) : MasterDenoiser( ClusterSessionParams::MasterDenoiser::MASTER_DENOISER_OPTIX, save_denoise_io, save_every_n_images, output_folder_path, is_denoising_passes_on, image_output_provider), - max_img_width(max_img_width), max_img_height(max_img_height) { - LOG(INFO) << "Creating MasterOptixDenoiser"; + max_img_width(max_img_width), max_img_height(max_img_height), + cuda_context(cuda_context) { + LOG(INFO) << "Creating MasterOptixDenoiser with width: " << max_img_width << " height: " << max_img_height << + " save_io: " << save_denoise_io << " save_every_n_images: " << save_every_n_images << " output_folder_path: " << + output_folder_path << " is_denoising_passes_on: " << is_denoising_passes_on; + OptixDeviceContext optix_context = nullptr; OptixDenoiserOptions denoiser_options; if(is_denoising_passes_on) { @@ -39,166 +38,69 @@ MasterOptixDenoiser::MasterOptixDenoiser(bool save_denoise_io, int save_every_n_ denoiser_options.guideNormal = 0; } //denoiser_options.pixelFormat = OPTIX_PIXEL_FORMAT_FLOAT3; - CUdevice cuda_device = 0; + OptixDeviceContextOptions options = {}; - -#ifdef WITH_CUDA_DYNLOAD - LOG(INFO) << "WITH_CUDA_DYNLOAD is on, call cuewInit to load CUDA library"; - // When we run as a part of the blender the CUDA lib is normally already loaded by CUDADevice - // However it's not loaded when is called from unit tests so call cuewInit here to be sure - // that CUDA lib is loaded, it's safe to call cuewInit multiple times - if (cuewInit(CUEW_INIT_CUDA) != CUEW_SUCCESS) { - throw std::runtime_error("Error. CUEW failed to load CUDA lib"); - } -#endif - - CUresult cu_result = cuDeviceGet(&cuda_device, DEVICE_NUM); - // CUDA is normally initialised by CUDADevice however it's not initialised - // when is called from unit tests, check if CUDA is initialised, run cuInit if not. - if(cu_result == CUDA_ERROR_NOT_INITIALIZED) { - LOG(WARNING) << "Cuda is not initialised, run cuInit"; - cu_result = cuInit(0); - if (cu_result != CUDA_SUCCESS) { - std::string message = "Error. Cannot initialise CUDA, cuInit failed " - "with error code: " + std::to_string(cu_result); - throw std::runtime_error(message); - } - cu_result = cuDeviceGet(&cuda_device, DEVICE_NUM); - } - - if(cu_result != CUDA_SUCCESS) { - std::string message = "Error. Cannot create MasterOptixDenoiser due to cuDeviceGet failed " - "with error code: " + std::to_string(cu_result); - throw std::runtime_error(message); - } - - unsigned int ctx_flags = CU_CTX_LMEM_RESIZE_TO_MAX; - cu_result = cuCtxCreate(&cuda_context, ctx_flags, cuda_device); - if(cu_result != CUDA_SUCCESS) { - std::string message = "Error. Cannot create MasterOptixDenoiser due to cuCtxCreate failed " - "with error code: " + std::to_string(cu_result); - throw std::runtime_error(message); - } const CUDAContextScope scope(cuda_context); if (g_optixFunctionTable.optixDeviceContextCreate != NULL) { LOG(INFO) << "Optix function table is already initialized, continue."; } else { LOG(INFO) << "Initializing Optix..."; - OptixResult optix_result = optixInit(); - if(optix_result == OPTIX_ERROR_UNSUPPORTED_ABI_VERSION) { - std::string message = "Error. OptiX initialization failed because the installed driver does not support " - "ABI version: " + std::to_string(OPTIX_ABI_VERSION); - throw std::runtime_error(message); - } - else if(optix_result != OPTIX_SUCCESS) { - std::string message = "Error. OptiX initialization failed with error code: " + std::to_string(optix_result); - throw std::runtime_error(message); - } + OPTIX_API_CALL(optixInit(), THROW_IF_ERROR); LOG(INFO) << "Initialization of Optix function table complete"; } - - OptixResult optix_result = optixDeviceContextCreate(cuda_context, &options, &optix_context); - if(optix_result != OPTIX_SUCCESS) { - std::string message = "Error. Cannot create MasterOptixDenoiser due to optixDeviceContextCreate failed " - "with error code: " + std::to_string(optix_result); - throw std::runtime_error(message); - } + OPTIX_API_CALL(optixDeviceContextCreate(cuda_context, &options, &optix_context), THROW_IF_ERROR); //TODO: figure out how to support KIND_TEMPORAL, needs vecocity vector - optix_result = optixDenoiserCreate(optix_context, OPTIX_DENOISER_MODEL_KIND_HDR, - &denoiser_options, &denoiser); - if(optix_result != OPTIX_SUCCESS) { - std::string message = "Error. Cannot create MasterOptixDenoiser due to optixDeviceContextCreate failed " - "with error code: " + std::to_string(optix_result); - throw std::runtime_error(message); - } + OPTIX_API_CALL(optixDenoiserCreate(optix_context, OPTIX_DENOISER_MODEL_KIND_HDR, + &denoiser_options, &denoiser), THROW_IF_ERROR); if(denoiser == nullptr) { throw std::runtime_error("Error. Cannot create MasterOptixDenoiser due to optixDenoiserCreate returned no denoiser"); } - /* - optix_result = optixDenoiserSetModel(denoiser, OPTIX_DENOISER_MODEL_KIND_HDR, nullptr, 0); - if(optix_result != OPTIX_SUCCESS) { - std::string message = "Error. Cannot create MasterOptixDenoiser due to optixDenoiserSetModel failed " - "with error code: " + std::to_string(optix_result); - throw std::runtime_error(message); - } - */ + // OPTIX_API_CALL(optixDenoiserSetModel(denoiser, OPTIX_DENOISER_MODEL_KIND_HDR, nullptr, 0), THROW_IF_ERROR); + memset(&denoiser_sizes, 0, sizeof(OptixDenoiserSizes)); - optix_result = optixDenoiserComputeMemoryResources(denoiser, max_img_width, max_img_height, &denoiser_sizes); - if(optix_result != OPTIX_SUCCESS) { - std::string message = "Error. Cannot create MasterOptixDenoiser due to optixDenoiserComputeMemoryResources failed " - "with error code: " + std::to_string(optix_result); - throw std::runtime_error(message); - } + OPTIX_API_CALL(optixDenoiserComputeMemoryResources(denoiser, max_img_width, max_img_height, &denoiser_sizes), + THROW_IF_ERROR); - cu_result = cuMemAlloc(&state_denoiser, denoiser_sizes.stateSizeInBytes); - if(cu_result != CUDA_SUCCESS) { - std::string message = "Error. Cannot create MasterOptixDenoiser due to cuMemAlloc for state_denoiser failed " - "with error code: " + std::to_string(cu_result); - throw std::runtime_error(message); - } - - cu_result = cuMemAlloc(&scratch_denoiser, denoiser_sizes.withoutOverlapScratchSizeInBytes); - if(cu_result != CUDA_SUCCESS) { - std::string message = "Error. Cannot create MasterOptixDenoiser due to cuMemAlloc for scratch_denoiser failed " - "with error code: " + std::to_string(cu_result); - throw std::runtime_error(message); - } - optix_result = optixDenoiserSetup( + CUDA_API_CALL(cuMemAlloc(&state_denoiser, denoiser_sizes.stateSizeInBytes), THROW_IF_ERROR); + CUDA_API_CALL(cuMemAlloc(&scratch_denoiser, denoiser_sizes.withoutOverlapScratchSizeInBytes), THROW_IF_ERROR); + CUDA_API_CALL(cuStreamCreate(&cuda_stream, CU_STREAM_DEFAULT), THROW_IF_ERROR); + OPTIX_API_CALL(optixDenoiserSetup( denoiser, - 0, // cuda stream, + cuda_stream, max_img_width, max_img_height, state_denoiser, denoiser_sizes.stateSizeInBytes, scratch_denoiser, - denoiser_sizes.withoutOverlapScratchSizeInBytes); - if(optix_result != OPTIX_SUCCESS) { - std::string message = "Error. Cannot create MasterOptixDenoiser due to optixDenoiserSetup failed " - "with error code: " + std::to_string(optix_result); - throw std::runtime_error(message); - } + denoiser_sizes.withoutOverlapScratchSizeInBytes), THROW_IF_ERROR); allocateCudaBuffer(&cuda_pixels_buffer, getMaxBufferSize(), "input/noisy pixels buffer"); + if(is_denoising_passes_on) { allocateCudaBuffer(&cuda_albedo_buffer, getMaxBufferSize(), "input albedo buffer"); allocateCudaBuffer(&cuda_normal_buffer, getMaxBufferSize(), "input normal buffer"); } allocateCudaBuffer(&cuda_denoised_pixels_buffer, getMaxBufferSize(), "denoised pixels buffer"); + LOG(INFO) << "MasterOptixDenoiser creation is complete"; } MasterOptixDenoiser::~MasterOptixDenoiser() { LOG(INFO) << "Destroing MasterOptixDenoiser"; - CUresult cu_result = cuMemFree(scratch_denoiser); - if(cu_result != CUDA_SUCCESS) { - LOG(ERROR) << "Error. cuMemFree failed for scratch_denoiser with error code: " << std::to_string(cu_result); - } - cu_result = cuMemFree(state_denoiser); - if(cu_result != CUDA_SUCCESS) { - LOG(ERROR) << "Error. cuMemFree failed for state_denoiser with error code: " << std::to_string(cu_result); - } - OptixResult optix_result = optixDenoiserDestroy(denoiser); - if(optix_result != OPTIX_SUCCESS) { - LOG(ERROR) << "Error. optixDenoiserDestroy failed with error code: " << optix_result; - } + const CUDAContextScope scope(cuda_context); + CUDA_API_CALL(cuMemFree(scratch_denoiser), DO_NOT_THROW); + CUDA_API_CALL(cuMemFree(state_denoiser), DO_NOT_THROW); + CUDA_API_CALL(cuStreamDestroy(cuda_stream), DO_NOT_THROW); + OPTIX_API_CALL(optixDenoiserDestroy(denoiser), DO_NOT_THROW); releaseCudaBuffers(); - cu_result = cuCtxDestroy(cuda_context); - if(cu_result != CUDA_SUCCESS) { - LOG(ERROR) << "Error. cuCtxDestroy failed with error code: " << std::to_string(cu_result); - } LOG(INFO) << "Destroyed MasterOptixDenoiser"; } void MasterOptixDenoiser::allocateCudaBuffer(CUdeviceptr * buffer_ptr, size_t buffer_size, const std::string & buffer_name) { LOG(INFO) << "Allocating cuda memory for " << buffer_name; - CUresult cu_result = cuMemAlloc(buffer_ptr, buffer_size); - if(cu_result != CUDA_SUCCESS) { - std::string message = "Error. Could not allocate memory for " + buffer_name + " on device. Cuda error code: " - + std::to_string(cu_result); - throw std::runtime_error(message); - } + CUDA_API_CALL(cuMemAlloc(buffer_ptr, buffer_size), THROW_IF_ERROR); } size_t MasterOptixDenoiser::getMaxBufferSize() { @@ -206,32 +108,17 @@ size_t MasterOptixDenoiser::getMaxBufferSize() { } void MasterOptixDenoiser::releaseCudaBuffers() { - CUresult cu_result = cuMemFree(cuda_pixels_buffer); - if(cu_result != CUDA_SUCCESS) { - "Error. cuMemFree failed for cuda_pixels_buffer with error code:" + std::to_string(cu_result); - } + CUDA_API_CALL(cuMemFree(cuda_pixels_buffer), DO_NOT_THROW); cuda_pixels_buffer = 0; - if(cuda_albedo_buffer) { - cu_result = cuMemFree(cuda_albedo_buffer); - if(cu_result != CUDA_SUCCESS) { - "Error. cuMemFree failed for cuda_albedo_buffer with error code:" + std::to_string(cu_result); - } + CUDA_API_CALL(cuMemFree(cuda_albedo_buffer), DO_NOT_THROW); cuda_albedo_buffer = 0; } - if(cuda_normal_buffer) { - cu_result = cuMemFree(cuda_normal_buffer); - if(cu_result != CUDA_SUCCESS) { - "Error. cuMemFree failed for cuda_normal_buffer with error code:" + std::to_string(cu_result); - } + CUDA_API_CALL(cuMemFree(cuda_normal_buffer), DO_NOT_THROW); cuda_normal_buffer = 0; } - - cu_result = cuMemFree(cuda_denoised_pixels_buffer); - if(cu_result != CUDA_SUCCESS) { - "Error. cuMemFree failed for cuda_denoised_pixels_buffer with error code:" + std::to_string(cu_result); - } + CUDA_API_CALL(cuMemFree(cuda_denoised_pixels_buffer), DO_NOT_THROW); cuda_denoised_pixels_buffer = 0; } @@ -252,11 +139,7 @@ bool MasterOptixDenoiser::initInputOptixImage(OptixImage2D &input_image, CUdevic const int row_stride = server_image.getWidth() * pixel_stride; const size_t buffer_size_bytes = sizeof(cgr_libcluster::float3) * server_image.getWidth() * server_image.getHeight(); - CUresult cu_result = cuMemcpyHtoD(input_buffer_device, input_buffer_host, buffer_size_bytes); - if(cu_result != CUDA_SUCCESS) { - LOG(ERROR) << "Error. Could not copy input buffer from host to device memory. CUDA error code: " << cu_result; - return false; - } + CUDA_API_CALL(cuMemcpyHtoD(input_buffer_device, input_buffer_host, buffer_size_bytes), THROW_IF_ERROR); input_image.data = input_buffer_device; input_image.width = server_image.getWidth(); input_image.height = server_image.getHeight(); @@ -267,6 +150,7 @@ bool MasterOptixDenoiser::initInputOptixImage(OptixImage2D &input_image, CUdevic } DenoisingResult MasterOptixDenoiser::denoise(ServerImage &server_image) { + const CUDAContextScope scope(cuda_context); if(save_denoise_io) { if (save_every_n_images <= 0) { LOG(ERROR) << "Error: invalid save_every_n_images: " << save_every_n_images; @@ -314,7 +198,7 @@ DenoisingResult MasterOptixDenoiser::denoise(ServerImage &server_image) { OptixResult optix_result = optixDenoiserInvoke( denoiser, - 0, // cuda stream + cuda_stream, ¶ms_denoiser, state_denoiser, denoiser_sizes.stateSizeInBytes, @@ -329,24 +213,28 @@ DenoisingResult MasterOptixDenoiser::denoise(ServerImage &server_image) { LOG(ERROR) << "Error. Optix denoise failed. OPTIX error code: " << optix_result; return DenoisingResult::FAILED; } - - const size_t buffer_size_bytes = sizeof(cgr_libcluster::float3) * server_image.getWidth() * server_image.getHeight(); - CUresult cu_result = cuMemcpyDtoH(server_image.denoised_pixels, cuda_denoised_pixels_buffer, buffer_size_bytes); - if(cu_result != CUDA_SUCCESS) { - LOG(ERROR) << "Error. Could not copy image buffer from device to host memory. CUDA error code: " << cu_result; - return DenoisingResult::FAILED; - } - - server_image.denoised = true; + // explicitly wait till denoising is complete + cuStreamSynchronize(cuda_stream); + denoised_buffer_size_bytes = sizeof(cgr_libcluster::float3) * server_image.getWidth() * server_image.getHeight(); LOG(INFO) << "denoising time for frame: " << server_image.camera.frame << " " << denoizer_timer.elapsed(); - if(save_denoise_io) { - if (save_every_n_images <= 0) { - LOG(ERROR) << "Error: invalid save_every_n_images: " << save_every_n_images; - } else if (server_image.getFrame() % save_every_n_images == 0) { - saveDenoisedImage(server_image); - } - } - return server_image.denoised ? DenoisingResult::OK : DenoisingResult::FAILED; + return DenoisingResult::OK; +} + +CUdeviceptr MasterOptixDenoiser::getCudaMemPointerToDenoisedImage() const { + return cuda_denoised_pixels_buffer; +} + +CUstream MasterOptixDenoiser::getCudaStream() const { + return cuda_stream; +} + +void MasterOptixDenoiser::copyDenoisedImageFromCudaToHostMemory(uint8_t * dest_host_memory) { + scoped_timer copy_device_2_host_timer; + const CUDAContextScope scope(cuda_context); + CUDA_API_CALL(cuMemcpyDtoHAsync(dest_host_memory, cuda_denoised_pixels_buffer, + denoised_buffer_size_bytes, cuda_stream), THROW_IF_ERROR); + CUDA_API_CALL(cuStreamSynchronize(cuda_stream), THROW_IF_ERROR); + LOG(INFO) << "time to copy denoised image from device to host: " << copy_device_2_host_timer.elapsed(); } } // cgr_libcluster diff --git a/intern/cycles/cluster_rendering/libcluster/denoising/master_optix_denoiser.h b/intern/cycles/cluster_rendering/libcluster/denoising/master_optix_denoiser.h index 6cb3cc3abec..7d9756cf704 100644 --- a/intern/cycles/cluster_rendering/libcluster/denoising/master_optix_denoiser.h +++ b/intern/cycles/cluster_rendering/libcluster/denoising/master_optix_denoiser.h @@ -8,6 +8,8 @@ // The macro below is used by Optix SDK and is necessary to avoid DSO loading collision // See device_optix.cpp for example. #define OPTIX_DONT_INCLUDE_CUDA +#else + #include #endif #include @@ -32,21 +34,27 @@ public: static const size_t DEFAULT_IMAGE_WIDTH_PIXELS = 1024; static const size_t DEFAULT_IMAGE_HEIGHT_PIXELS = 1024; - MasterOptixDenoiser(bool save_denoise_io, int save_every_n_images, const std::string & output_folder_path, - bool is_denoising_passes_on, const ImageOutputProvider & image_output_provider, int max_img_width, int max_img_height); + MasterOptixDenoiser(CUcontext cuda_context, bool save_denoise_io, int save_every_n_images, + const std::string & output_folder_path, bool is_denoising_passes_on, + const ImageOutputProvider & image_output_provider, int max_img_width, int max_img_height); virtual ~MasterOptixDenoiser(); virtual DenoisingResult denoise(ServerImage &server_image) override; MasterOptixDenoiser(MasterOptixDenoiser const&) = delete; - void operator=(MasterOptixDenoiser const&) = delete; + void operator=(MasterOptixDenoiser const&) = delete; + CUstream getCudaStream() const; + CUdeviceptr getCudaMemPointerToDenoisedImage() const; + + void copyDenoisedImageFromCudaToHostMemory(uint8_t * dest_host_memory); -private: // For denoising we always use device # 0 // rendering can be assigned to devices starting from #1 // so denoising and rendering do not run on the same device and do not fight for GPU resources static const int DEVICE_NUM = 0; +private: + void allocateCudaBuffer(CUdeviceptr * buffer_ptr, size_t buffer_size, const std::string & buffer_name); void releaseCudaBuffers(); size_t getMaxBufferSize(); @@ -54,6 +62,18 @@ private: ServerImage &server_image, ImagePixel * input_buffer_host); void initOutputOptixImage(OptixImage2D &output_image, ServerImage &server_image); + // We set cuda stream explicitly so components which use denoised image down the stream + // like pixel conversion and image compression can run within the same cuda stream + // and therefore be implicitly synchronized for the best possible performance. + // For now we synchronize explicitly by calling cuStreamSynchronize() in the denoise() + // so denoise time logging outputs correct denoising duration. Denosing time calculation + // and logging needs to be updated upon switching to implicit synchronization by CUDA runtime. + // Here is a task for that improvement: T143205954. + // Currently cuda stream is owned by this class for simplicity. Alternatively cuda context + // can be passed from outside but this requires more changes in ServerImageBuffer and DenoisingContext. + // Not making this change at this point to limit the scope to the nvdecoder integration, + // this change can be done as part of the task for synchronization improvement mentioned above. + CUstream cuda_stream = nullptr; CUcontext cuda_context = nullptr; OptixDenoiser denoiser = nullptr; OptixDenoiserSizes denoiser_sizes; @@ -64,7 +84,11 @@ private: CUdeviceptr cuda_pixels_buffer = 0; CUdeviceptr cuda_albedo_buffer = 0; CUdeviceptr cuda_normal_buffer = 0; + + CUdevice m_cuda_device; + CUdeviceptr cuda_denoised_pixels_buffer = 0; + size_t denoised_buffer_size_bytes = 0; int max_img_width = DEFAULT_IMAGE_WIDTH_PIXELS; int max_img_height = DEFAULT_IMAGE_HEIGHT_PIXELS; }; diff --git a/intern/cycles/cluster_rendering/libcluster/net_camera.cpp b/intern/cycles/cluster_rendering/libcluster/net_camera.cpp index ae12269de70..93d318d7f8f 100644 --- a/intern/cycles/cluster_rendering/libcluster/net_camera.cpp +++ b/intern/cycles/cluster_rendering/libcluster/net_camera.cpp @@ -90,6 +90,7 @@ bool NetCamera::operator==(const NetCamera &other_camera) const this->compression_quality == other_camera.compression_quality && this->master_denoiser == other_camera.master_denoiser && this->master_image_color_format == other_camera.master_image_color_format && + this->master_image_compressor == other_camera.master_image_compressor && this->worker_map == other_camera.worker_map; } diff --git a/intern/cycles/cluster_rendering/libcluster/net_camera.fbs b/intern/cycles/cluster_rendering/libcluster/net_camera.fbs index 9e07cbd07b6..72c8c9b7909 100644 --- a/intern/cycles/cluster_rendering/libcluster/net_camera.fbs +++ b/intern/cycles/cluster_rendering/libcluster/net_camera.fbs @@ -3,6 +3,7 @@ namespace cgr_libcluster; enum CameraTypeFlatBuffer:byte { CAMERA_PERSPECTIVE = 0, CAMERA_ORTHOGRAPHIC, CAMERA_PANORAMA } enum MasterDenoiserFlatBuffer:byte { MASTER_DENOISER_NONE = 0, MASTER_DENOISER_OIDN, MASTER_DENOISER_OPTIX, MASTER_DENOISER_BARCELONA } enum MasterImageColorFormatFlatBuffer:byte { MASTER_IMAGE_COLOR_FORMAT_LINEAR = 1, MASTER_IMAGE_COLOR_FORMAT_SRGB = 2} +enum MasterImageCompressorFlatBuffer:byte { MASTER_IMAGE_COMPRESSOR_JPEG = 1, MASTER_IMAGE_COMPRESSOR_NVENCODER = 2} table NetCameraFlatBuffer { cam_matrix:TransformFlatBuffer; @@ -28,6 +29,7 @@ table NetCameraFlatBuffer { worker_map:WorkerMapFlatBuffer; scene_frame:int; master_image_color_format:MasterImageColorFormatFlatBuffer = MASTER_IMAGE_COLOR_FORMAT_LINEAR; + master_image_compressor:MasterImageCompressorFlatBuffer = MASTER_IMAGE_COMPRESSOR_JPEG; } table TransformFlatBuffer { diff --git a/intern/cycles/cluster_rendering/libcluster/net_camera.h b/intern/cycles/cluster_rendering/libcluster/net_camera.h index cb556fbd7b4..e0d128755bd 100644 --- a/intern/cycles/cluster_rendering/libcluster/net_camera.h +++ b/intern/cycles/cluster_rendering/libcluster/net_camera.h @@ -90,6 +90,8 @@ public: bool expect_modify_object_message = false; ClusterSessionParams::MasterImageColorFormat master_image_color_format = ClusterSessionParams::DEFAULT_MASTER_IMAGE_COLOR_FORMAT; + ClusterSessionParams::MasterImageCompressor master_image_compressor = + ClusterSessionParams::DEFAULT_MASTER_IMAGE_COMPRESSOR; }; diff --git a/intern/cycles/cluster_rendering/libcluster/net_server.cpp b/intern/cycles/cluster_rendering/libcluster/net_server.cpp index 543a456fe17..11f470af747 100644 --- a/intern/cycles/cluster_rendering/libcluster/net_server.cpp +++ b/intern/cycles/cluster_rendering/libcluster/net_server.cpp @@ -8,9 +8,19 @@ #include "denoising/denoising_context.h" #include "streamed_image.h" +// Not sure that this is the best place to set OS_LINUX +// IMO it should be set somewhere on higher level +// setting it here for now to get system built. +// TODO: revisit this flag later +#if defined(linux) || defined(__linux) || defined(__linux__) +# ifndef OS_LINUX +# define OS_LINUX +# endif +#endif + namespace cgr_libcluster { -NetServer::~NetServer() +NetServer::~NetServer() { } @@ -41,7 +51,7 @@ void NetServer::stop() theClient->reset(); //wait for main client to connect - bool newClient = false; + bool newClient = false; while (!newClient) { acceptConnection(masterPort); //tell main client to start @@ -54,8 +64,8 @@ void NetServer::stop() netThread = new std::thread(std::bind(&NetServer::run, this)); } } - tbb::mutex::scoped_lock imageStoplock(serverImage.serverImageStopMutex); - serverImage.reset(); + tbb::mutex::scoped_lock imageStoplock(server_image_buffer.serverImageStopMutex); + server_image_buffer.reset(); } void NetServer::createStreamedImage(unsigned int readIndex, ServerImage ¤tImage, DenoisingContext & denoising_context) @@ -65,8 +75,8 @@ void NetServer::createStreamedImage(unsigned int readIndex, ServerImage ¤t #ifndef WORKER_IMAGE_STREAMING_TEST mergeWithWorkers = theClient->mergeWith(currentImage); #endif - serverImage.createStreamedImage(readIndex, mergeWithWorkers, denoising_context); - } + server_image_buffer.createStreamedImage(readIndex, mergeWithWorkers, denoising_context); + } } bool NetServer::isMaster(){ @@ -87,6 +97,7 @@ void NetServer::set_save_every_n_images(int save_every_n_images) { void NetServer::set_output_folder_path(const std::string & output_folder_path) { this->output_folder_path = output_folder_path; + server_image_buffer.set_output_folder_path(output_folder_path); } void NetServer::run() @@ -100,13 +111,15 @@ void NetServer::run() for (;;) { if (stopped) break; - tbb::mutex::scoped_lock imageStopLock(serverImage.serverImageStopMutex); - if (!serverImage.isEmpty()) { - int readIndex = serverImage.readIndex; - LOG(INFO) << "server image indices: " << serverImage.writeIndex << " " << serverImage.readIndex; - ServerImage& currentImage = serverImage.readImage(); + tbb::mutex::scoped_lock imageStopLock(server_image_buffer.serverImageStopMutex); + if (!server_image_buffer.isEmpty()) { + int readIndex = server_image_buffer.readIndex; + LOG(INFO) << "server image indices: " << server_image_buffer.writeIndex << " " << server_image_buffer.readIndex; + ServerImage& currentImage = server_image_buffer.readImage(); //if has client, combine with correct client image before sending - if (isMaster()) createStreamedImage(readIndex, currentImage, denoising_context); + if (isMaster()) { + createStreamedImage(readIndex, currentImage, denoising_context); + } RPCSend streamSnd(image_socket, &error_func, SERVER_STREAM_IMAGE_CMD); if (error_func.have_error()) { @@ -114,7 +127,7 @@ void NetServer::run() } if (isMaster()) { // Create a StreamedImage - StreamedImage &streamedImage = serverImage.streamedImageBuffer[readIndex]; + StreamedImage &streamedImage = server_image_buffer.streamedImageBuffer[readIndex]; send_streamed_image(streamSnd, streamedImage); if (save_streamed_image) { if (save_every_n_images <= 0) { @@ -135,12 +148,16 @@ void NetServer::run() time_sleep(NET_IMAGE_PAUSE); } } //endfor +#if defined(OS_LINUX) && defined(WITH_CUDA) + server_image_buffer.deleteNvEncoder(); +#endif + VLOG(3) << "Exit NetServer run loop"; } NetServer::NetServer( const char *masterAddr, unsigned short masterPort) - : NetBase(SERVER_PORT, masterAddr), - theClient(NULL), serverImage(IMAGE_FRAME_COUNT, true, true), + : NetBase(SERVER_PORT, masterAddr), + theClient(NULL), server_image_buffer(IMAGE_FRAME_COUNT, true, true), masterPort(masterPort), net_camera_command(net_camera, modify_object_message) { // acceptConnection(); @@ -159,7 +176,7 @@ ClusterRenderCommand& NetServer::wait_for_client_command() { rcv.read_buffer(&cam, sizeof(cam)); LOG(INFO) << "server receive camera for frame: " << cam.frame << " cam_width: " << cam.cam_width << " cam_height: " << cam.cam_height ; numCameraReceived++; - serverImage.set_master_image_color_format(cam.master_image_color_format); + server_image_buffer.set_master_image_color_format(cam.master_image_color_format); if(cam.expect_modify_object_message) { rcv.read_buffer(&modify_object_message, sizeof(modify_object_message)); if (theClient) { @@ -171,16 +188,16 @@ ClusterRenderCommand& NetServer::wait_for_client_command() { //master prepare to receive image from workers //FENGTOFIX: this is going to cause a prob //if client camera for frame n is received - //before worker images for frame n-1 have been + //before worker images for frame n-1 have been //processed!!!! - //make workers do 2xworker_count work + //make workers do 2xworker_count work NetCamera childCam(cam); childCam.integrator_seed = cam.integrator_seed+5; childCam.sampleCount = cam.sampleCount * theClient->getChildrenCount(); theClient->send_camera(childCam); } - serverImage.begin_frame(cam); + server_image_buffer.begin_frame(cam); return net_camera_command; } else if (rcv.name == KILL_CMD) { LOG(INFO) << "received terminate message...exiting"; @@ -199,8 +216,8 @@ bool NetServer::send_tile(const NetRenderTile& rtile) //because rtile has no imformation about NetCamera //we have to insert NetCamera associated with image //before first tile insertion - - ServerImage* activeImage = serverImage.getActiveImage(); + + ServerImage* activeImage = server_image_buffer.getActiveImage(); assert(activeImage); if (activeImage) { //no locking because tile insertion are for different elements in the buffer @@ -208,11 +225,11 @@ bool NetServer::send_tile(const NetRenderTile& rtile) if (activeImage->tile_count == getMaxTileCount()) { activeImage->imageCount = 1; activeImage->sampleCount = activeImage->camera.sampleCount; - serverImage.endInsertImage(); + server_image_buffer.endInsertImage(); } std::cout << "Insert tile progress: "<< activeImage->tile_count << " "<< getMaxTileCount() << "\n"; return true; - } + } return false; } @@ -230,7 +247,7 @@ void NetServer::serverConnectionsThread(NetClient *theClient, unsigned short mas delete slave; maxSlaves--; } - } + } theClient->slaveCount = slaveCount; } diff --git a/intern/cycles/cluster_rendering/libcluster/net_server.h b/intern/cycles/cluster_rendering/libcluster/net_server.h index 886db0341ef..86631dbe0f2 100644 --- a/intern/cycles/cluster_rendering/libcluster/net_server.h +++ b/intern/cycles/cluster_rendering/libcluster/net_server.h @@ -22,7 +22,7 @@ class NetServer: public NetBase { NetClient *theClient; //client of worker servers void createStreamedImage(unsigned int index, ServerImage& curImage, DenoisingContext & denoising_context); - ServerImageBuffer serverImage; + ServerImageBuffer server_image_buffer; unsigned short masterPort; diff --git a/intern/cycles/cluster_rendering/libcluster/serializer.h b/intern/cycles/cluster_rendering/libcluster/serializer.h index ef63ab8b1d9..9f279eb5661 100644 --- a/intern/cycles/cluster_rendering/libcluster/serializer.h +++ b/intern/cycles/cluster_rendering/libcluster/serializer.h @@ -10,9 +10,9 @@ namespace cgr_libcluster { //using namespace ClusterRenderer; class NetCamera; - class Float4FlatBuffer; - class TransformFlatBuffer; - + struct Float4FlatBuffer; + struct TransformFlatBuffer; + class Serializer { public: class Buffer { @@ -23,16 +23,14 @@ public: size_t size; }; - // Pass in FlatBuffer otherwise the memory won't exist when the method is exited - + // Pass in FlatBufferBuilder otherwise the memory won't exist when the method is exited static Buffer serialize(flatbuffers::FlatBufferBuilder &builder, const NetCamera & net_camera); static void deserialize(uint8_t *buffer_pointer, NetCamera &net_camera_out); private: static void bufferToFloat4(const Float4FlatBuffer * float4_flatbuffer, cgr_libcluster::float4 & float4_out); - static void bufferToTransform(const TransformFlatBuffer * cam_matrix_flatbuffer, cgr_libcluster::Transform & cam_matrix_out); -}; +}; } // cgr_libcluster diff --git a/intern/cycles/cluster_rendering/libcluster/server_image.cpp b/intern/cycles/cluster_rendering/libcluster/server_image.cpp index c648669aa82..01dd15edf9e 100644 --- a/intern/cycles/cluster_rendering/libcluster/server_image.cpp +++ b/intern/cycles/cluster_rendering/libcluster/server_image.cpp @@ -125,6 +125,8 @@ size_t ServerImage::read(RPCReceive &rcv) NetCamera net_camera; rcv.read_buffer(&net_camera, sizeof(NetCamera)); size_t buf_size = net_camera.cam_width * net_camera.cam_height * sizeof (ImagePixel); + // log line below is used by get_metrics.py to calculate stats. If you change it + // please make sure get_metrics.py still works correctly. Update if needed. LOG(INFO) << "net camera received for frame: " << net_camera.frame << " " << net_camera.cam_width << " " << net_camera.cam_height; rcv.read_buffer(pixels, buf_size); LOG(INFO) << "read image for frame: " << net_camera.frame << " with sample: " << net_camera.sampleCount << " of size: " << buf_size; diff --git a/intern/cycles/cluster_rendering/libcluster/server_image_buffer.cpp b/intern/cycles/cluster_rendering/libcluster/server_image_buffer.cpp index 9e0561e5a64..e7a6bffbe42 100644 --- a/intern/cycles/cluster_rendering/libcluster/server_image_buffer.cpp +++ b/intern/cycles/cluster_rendering/libcluster/server_image_buffer.cpp @@ -1,22 +1,56 @@ + +#include "net_base.h" // for NET_IMAGE_TIMEOUT + +#include // to save video stream + #include + +#ifdef WITH_CUDA +#ifdef WITH_CUDA_DYNLOAD + #include + // Do not use CUDA SDK headers when using CUEW + // The macro below is used by Optix SDK and is necessary to avoid DSO loading collision + // See device_optix.cpp for example. + #define OPTIX_DONT_INCLUDE_CUDA +#else + #include +#endif +#include "compression/nv_decoder.h" +#include "compression/nv_encoder.h" +#endif +#include "compression/turbojpeg_compressor.h" +#ifdef WITH_CUDA + #include "cuda_context_provider.h" +#endif #include "./utils/timer.h" // for time_dt #include "denoising/denoising_context.h" -#include "net_base.h" // for NET_IMAGE_TIMEOUT +#ifdef WITH_OPTIX +#include "denoising/master_optix_denoiser.h" +#endif #include "server_image_buffer.h" #include "server_image.h" +#ifdef WITH_CUDA + #include "../libcluster_cuda_kernels/gpu_image_utils.h" +#endif namespace cgr_libcluster { +const string ServerImageBuffer::VIDEO_FULL_FILE_NAME = "master_nvencoded_video_stream.h264"; + ServerImageBuffer::ServerImageBuffer(int s, bool hasServerImage, bool hasStreamImage): imageBuffer(NULL), buffer_size(s) { if (hasServerImage) imageBuffer = new ServerImage[buffer_size]; if (hasStreamImage) streamedImageBuffer.resize(buffer_size); + turbojpeg_compressor_uptr.reset(new TurbojpegCompressor()); reset(); } ServerImageBuffer::~ServerImageBuffer() { if (imageBuffer) delete []imageBuffer; +#if defined(OS_LINUX) && defined(WITH_CUDA) + releaseImageCompressionCudaBuffers(); +#endif } ServerImage* ServerImageBuffer::getActiveImage() { @@ -29,6 +63,12 @@ ServerImage& ServerImageBuffer::getReadImage() { return imageBuffer[readIndex]; } +void ServerImageBuffer::set_output_folder_path(const std::string & output_folder_path) { + this->output_folder_path = output_folder_path; + encoded_videostream_filename = output_folder_path + "/" + VIDEO_FULL_FILE_NAME; + VLOG(3) << "Path to the nvencoded video stream file: " << encoded_videostream_filename; +} + void ServerImageBuffer::reset() { readIndex = 0; @@ -95,6 +135,7 @@ void ServerImageBuffer::endInsertImage() //but I am leaving it fixed for now void ServerImageBuffer::parallel_convert(const ServerImage& src, std::vector& dst) { + scoped_timer pixel_convert_timer; int ts = src.getWidth() * src.getHeight(); int thread_count = PIXEL_THREAD_COUNT; int interval = ts/thread_count; @@ -118,6 +159,7 @@ void ServerImageBuffer::parallel_convert(const ServerImage& src, std::vector &encoded_image) const { + if(encoded_videostream_filename.length() < 1) { + return; + } + std::ofstream video_stream_file; + video_stream_file.open(encoded_videostream_filename, std::ios::app | std::ios::binary); + if(video_stream_file.is_open()) { + video_stream_file.write(reinterpret_cast(encoded_image.data()), encoded_image.size()); + VLOG(3) << "Wrote encoded image of size: " << encoded_image.size(); + video_stream_file.close(); + } else { + std::string message = "FATAL. Unable to open video stream output file: " + encoded_videostream_filename; + throw std::invalid_argument(message); + } +} + +#if defined(OS_LINUX) && defined(WITH_CUDA) + +void ServerImageBuffer::allocateCudaBuffer(CUdeviceptr * buffer_ptr, size_t buffer_size, const std::string & buffer_name) { + LOG(INFO) << "Allocating cuda memory for: " << buffer_name; + CUresult cu_result = cuMemAlloc(buffer_ptr, buffer_size); + if(cu_result != CUDA_SUCCESS) { + std::string message = "Error. Could not allocate memory for " + buffer_name + " on device. Cuda error code: " + + std::to_string(cu_result); + throw std::runtime_error(message); + } +} + +void ServerImageBuffer::resetNvEncoder(const ServerImage &server_image) { + VLOG(3) << "Resetting NvEncoder"; + CUcontext cuda_context = CudaContextProvider::getPrimaryContext(CUDA_DEVICE_NUM); + const CUDAContextScope scope(cuda_context); + // Renderer, optix denoiser and nvencoder share the same cuda context, call cuCtxSynchronize + // to make sure all operations on cuda context are completed before we start creating NvEncoder + // not doing this may lead to sporadical problems like nvEncDestroyEncoder call hangs + cuCtxSynchronize(); + nv_encoder_uptr.reset(new NvEncoder( + NV_ENC_BUFFER_FORMAT_NV12, + cuda_context, + server_image.getWidth(), + server_image.getHeight())); + VLOG(3) << "Created NvEncoder successfully"; + releaseImageCompressionCudaBuffers(); + allocateImageCompressionCudaBuffers(server_image); + VLOG(3) << "Resetting NvEncoder done"; +} + +void ServerImageBuffer::copyServerImageToGpuMemory(const ServerImage & server_image, CUdeviceptr & linear_image_buffer_gpu) { + CUcontext cuda_context = CudaContextProvider::getPrimaryContext(CUDA_DEVICE_NUM); + const CUDAContextScope scope(cuda_context); + uint8_t* linear_image_buffer_cpu = server_image.denoised ? (uint8_t*)server_image.denoised_pixels : + (uint8_t*)server_image.pixels; + CUresult cu_result = cuMemcpyHtoD(linear_image_buffer_gpu, linear_image_buffer_cpu, server_image.getBufferSize()); + if(cu_result != CUDA_SUCCESS) { + std::string message = "Error. Could not copy input buffer from host to device memory. CUDA error code: " + + std::to_string(cu_result); + throw std::runtime_error(message); + } +} + +void ServerImageBuffer::allocateImageCompressionCudaBuffers(const ServerImage &server_image) { + const size_t num_pixels = server_image.getImageSize(); + // 1 byte per color channel + const int nvencoder_yuv_nv12_buffer_size = num_pixels + (num_pixels)/2; + allocateCudaBuffer(&nvencoder_input_buffer_gpu, nvencoder_yuv_nv12_buffer_size, + "input cuda buffer for nvencoder"); + allocateCudaBuffer(&linear_image_buffer_gpu, server_image.getBufferSize(), + "cuda buffer to accept linear images from CPU memory"); +} + +void ServerImageBuffer::releaseImageCompressionCudaBuffers() { + CUresult cu_result = cuMemFree(nvencoder_input_buffer_gpu); + if(cu_result != CUDA_SUCCESS) { + "Error. cuMemFree failed for nvencoder_input_buffer_gpu with error code:" + std::to_string(cu_result); + } + nvencoder_input_buffer_gpu = 0; + + cu_result = cuMemFree(linear_image_buffer_gpu); + if(cu_result != CUDA_SUCCESS) { + "Error. cuMemFree failed for linear_image_buffer_gpu with error code:" + std::to_string(cu_result); + } + linear_image_buffer_gpu = 0; +} + +void ServerImageBuffer::deleteNvEncoder() { + nv_encoder_uptr.reset(nullptr); +} + +#endif // WITH_CUDA + void ServerImageBuffer::createStreamedImage(int index, bool scaled, DenoisingContext & denoising_context) { - ServerImage &bufferImage = imageBuffer[index]; - StreamedImage &streamedImage = streamedImageBuffer[index]; - streamedImage.initImage(bufferImage.camera); - bufferImage.denoised = false; + ServerImage &server_image = imageBuffer[index]; + StreamedImage &streamed_image = streamedImageBuffer[index]; + streamed_image.initImage(server_image.camera); + server_image.denoised = false; if(!scaled) { - normalizeImage(bufferImage); + normalizeImage(server_image); + } + float sampleCount = server_image.sampleCount; + +#if defined(OS_LINUX) && defined(WITH_CUDA) + const bool use_nvencoder_for_compression = server_image.camera.master_image_compressor == + ClusterSessionParams::MASTER_IMAGE_COMPRESSOR_NVENCODER; + if(use_nvencoder_for_compression) { + if(!nv_encoder_uptr || server_image.camera.frame == 0) { + resetNvEncoder(server_image); + } + } +#else + if(server_image.camera.master_image_compressor == ClusterSessionParams::MASTER_IMAGE_COMPRESSOR_NVENCODER) { + throw std::runtime_error("ERROR. NVENCODER compressor is requested. Server is compiled without CUDA support\ + so has no nvencoder and can not encode images with nvencoder.\ + Recompile with CUDA or use JPEG compressor instead. Terminating."); + } +#endif + + if(server_image.camera.master_denoiser) { + bool denoising_ok = denoiseImage(server_image, denoising_context); + if(!denoising_ok) { + throw std::runtime_error("Image denoising failed."); + } } - if(bufferImage.camera.master_denoiser) { - denoiseImage(bufferImage, denoising_context); + MasterDenoiser * master_denoiser = denoising_context.getDenoiser(server_image.camera.master_denoiser); + const bool image_is_denoised_by_optix_denoiser = master_denoiser && + master_denoiser->getType() == ClusterSessionParams::MasterDenoiser::MASTER_DENOISER_OPTIX; + +#ifdef WITH_OPTIX + MasterOptixDenoiser* optix_denoiser = nullptr; + if(image_is_denoised_by_optix_denoiser) { + optix_denoiser = dynamic_cast(master_denoiser); + } +#endif + + scoped_timer compression_timer; +#if defined(OS_LINUX) && defined(WITH_CUDA) + if(use_nvencoder_for_compression) { + #ifdef WITH_OPTIX + compressWithNvencoder(server_image, streamed_image, optix_denoiser); + #else + compressWithNvencoder(server_image, streamed_image); + #endif + } else { + #ifdef WITH_OPTIX + compressWithTurbojpeg(server_image, streamed_image, optix_denoiser); + #else + compressWithTurbojpeg(server_image, streamed_image); + #endif + } +#else + #ifdef WITH_OPTIX + compressWithTurbojpeg(server_image, streamed_image, optix_denoiser); + #else + compressWithTurbojpeg(server_image, streamed_image); + #endif +#endif + + double compression_time = compression_timer.elapsed(); + LOG(INFO) << "compression time for frame: " << server_image.camera.frame << " " << compression_time << + " (includes pixel conversion time)"; + } + +#if defined(OS_LINUX) && defined(WITH_CUDA) +#ifdef WITH_OPTIX +void ServerImageBuffer::compressWithNvencoder(ServerImage &server_image, StreamedImage &streamed_image, + MasterOptixDenoiser* optix_denoiser) { +#else +void ServerImageBuffer::compressWithNvencoder(ServerImage &server_image, StreamedImage &streamed_image) { +#endif + CUstream cuda_stream = nullptr; + CUdeviceptr image_to_compress_gpu_ptr = 0; +#ifdef WITH_OPTIX + if(optix_denoiser) { + cuda_stream = optix_denoiser->getCudaStream(); + image_to_compress_gpu_ptr = optix_denoiser->getCudaMemPointerToDenoisedImage(); + } else +#endif + { + copyServerImageToGpuMemory(server_image, linear_image_buffer_gpu); + image_to_compress_gpu_ptr = linear_image_buffer_gpu; + } + const bool useSrgbColorSpace = + master_image_color_format == ClusterSessionParams::MasterImageColorFormat::MASTER_IMAGE_COLOR_FORMAT_SRGB; + gpuRawRgbToGammaCorrectedYuvNv12(image_to_compress_gpu_ptr, server_image.getWidth(), + server_image.getHeight(), useSrgbColorSpace, cuda_stream, nvencoder_input_buffer_gpu); + nv_encoder_uptr->encode(nvencoder_input_buffer_gpu, streamed_image.getCompressedImage()); +} +#endif // WITH_CUDA + +#ifdef WITH_OPTIX +void ServerImageBuffer::compressWithTurbojpeg(ServerImage &server_image, StreamedImage &streamed_image, + MasterOptixDenoiser* optix_denoiser) { +#else +void ServerImageBuffer::compressWithTurbojpeg(ServerImage &server_image, StreamedImage &streamed_image) { +#endif +#ifdef WITH_OPTIX + if(optix_denoiser) { + optix_denoiser->copyDenoisedImageFromCudaToHostMemory( + (uint8_t*)server_image.denoised_pixels); + server_image.denoised = true; + } +#endif + parallel_convert(server_image, streamed_image.getByteBuffer()); + uint8_t *jpeg_data = NULL; + size_t buf_size = turbojpeg_compressor_uptr->compress(streamed_image.getByteBuffer().data(), + server_image.getWidth(), server_image.getHeight(), + server_image.camera.compression_quality, + jpeg_data); + if (jpeg_data) { + streamed_image.copyInCompressedImage(jpeg_data, buf_size); + turbojpeg_compressor_uptr->free(jpeg_data); } - scoped_timer pixel_convert_timer; - parallel_convert(bufferImage, streamedImage.getByteBuffer()); - LOG(INFO) << "pixel conversion time for frame: " << bufferImage.camera.frame << " " << pixel_convert_timer.elapsed(); } bool ServerImageBuffer::denoiseImage(ServerImage &bufferImage, DenoisingContext & denoising_context) { @@ -162,7 +402,22 @@ bool ServerImageBuffer::denoiseImage(ServerImage &bufferImage, DenoisingContext if(result == DenoisingResult::OK) { return true; } - +#ifdef WITH_OPTIX + else if (result == DenoisingResult::IMAGE_TOO_BIG && + master_denoiser->getType() == ClusterSessionParams::MasterDenoiser::MASTER_DENOISER_OPTIX) { + bool replacement_ok = denoising_context.replaceOptixDenoiser(bufferImage.getWidth(), bufferImage.getHeight()); + if(replacement_ok) { + master_denoiser = denoising_context.getDenoiser(bufferImage.camera.master_denoiser); + if(master_denoiser) { + return master_denoiser->denoise(bufferImage) == DenoisingResult::OK; + } else { + LOG(WARNING) << "WARNING. No denoiser instance of requested type: " << bufferImage.camera.master_denoiser; + } + } else { + LOG(ERROR) << "ERROR. replaceOptixDenoiser failed, image will not be denoised"; + } + } +#endif return false; } @@ -190,18 +445,18 @@ int ServerImageBuffer::add_frame(RPCReceive &rcv, std::atomic &stopped) { //Feng TODO: Add TIMEOUT if thin client gets stuck with not able //to process the streamed images -//not having this TIMEOUT requires the main SessionThread +//not having this TIMEOUT requires the main SessionThread //to stay alive until we have killed theClient //the same applies to add_frame(RPCReceive &) //where the network thread of the server for master -//need to keep pulling from the queue for there to +//need to keep pulling from the queue for there to //be space in the queue to add new worker frames int ServerImageBuffer::add_streamed_image(RPCReceive &rcv, std::atomic &stopped, bool save_streamed_image, const std::string &output_folder_path, int frame_id) { LOG(INFO) << "thin client tries to find space in the display image buffer"; - + // while (isFull() && numIterations++ < maxIterations) { while (isFull()) { if (stopped) return -1; @@ -209,19 +464,43 @@ int ServerImageBuffer::add_streamed_image(RPCReceive &rcv, } if (!isFull() ) { int frameIndex = writeIndex; - StreamedImage &writeImage = streamedImageBuffer[frameIndex]; - writeImage.read(rcv); + StreamedImage &streamed_image = streamedImageBuffer[frameIndex]; + streamed_image.read(rcv); + scoped_timer decompress_timer; +#if defined(OS_LINUX) && defined(WITH_CUDA) + const bool use_nvdecoder_for_decompression = streamed_image.getNetCamera().master_image_compressor == + ClusterSessionParams::MASTER_IMAGE_COMPRESSOR_NVENCODER; + if(use_nvdecoder_for_decompression) { + if(!nv_decoder_uptr || streamed_image.getFrame() == 0) { + VLOG(3) << "Creating nvdecoder"; + CUcontext cuda_context = CudaContextProvider::getPrimaryContext(CUDA_DEVICE_NUM); + const CUDAContextScope scope(cuda_context); + nv_decoder_uptr.reset(new NvDecoder(cuda_context)); + } + nv_decoder_uptr->decode(streamed_image.getCompressedImage(), + streamed_image.getFrame(), &streamed_image.getRgbImageBuffer()); + } else +#endif + { + int width = 0, height = 0; + streamed_image.getImage(width, height); + if (!turbojpeg_compressor_uptr->decompress(streamed_image.getCompressedImage(), width, height, + streamed_image.getRgbImageBuffer())) { + LOG(ERROR) << "jpeg decompression failed"; + } + } + LOG(INFO) << "decompress image for frame: " << streamed_image.getFrame() << " " << decompress_timer.elapsed(); if (save_streamed_image) { - if (!writeImage.saveImage(output_folder_path, "client_streamed_image_rgb_", frame_id)) { + if (!streamed_image.saveImage(output_folder_path, "client_streamed_image_rgb_", frame_id)) { LOG(ERROR) << "failed to save streamed image"; } } incWriteIndex(); - LOG(INFO) << "thin client insert master image frame: "<< writeImage.getFrame(); + LOG(INFO) << "thin client insert master image frame: "<< streamed_image.getFrame(); return frameIndex; } else { - StreamedImage writeImage; - writeImage.read(rcv); + StreamedImage streamed_image; + streamed_image.read(rcv); return -1; } } diff --git a/intern/cycles/cluster_rendering/libcluster/server_image_buffer.h b/intern/cycles/cluster_rendering/libcluster/server_image_buffer.h index 4b4da228c96..37b82a7e532 100644 --- a/intern/cycles/cluster_rendering/libcluster/server_image_buffer.h +++ b/intern/cycles/cluster_rendering/libcluster/server_image_buffer.h @@ -11,41 +11,54 @@ #define IMAGE_FRAME_COUNT 10 #define PIXEL_THREAD_COUNT 60 +// Not sure that this is the best place to set OS_LINUX +// IMO it should be set somewhere on higher level +// setting it here for now to get system built. +// TODO: revisit this flag later +#if defined(linux) || defined(__linux) || defined(__linux__) +# ifndef OS_LINUX +# define OS_LINUX +# endif +#endif + namespace cgr_libcluster { +using std::string; + class Camera; +class MasterOptixDenoiser; class NetCamera; +#if defined(OS_LINUX) && defined(WITH_CUDA) +class NvEncoder; +class NvDecoder; +#endif class RPCSend; class RPCReceive; class PathTraceDisplay; class ServerImage; class DenoisingContext; - +class TurbojpegCompressor; class ServerImageBuffer { public: ServerImageBuffer(int s=IMAGE_FRAME_COUNT, bool hasServerImage = true, bool hasStreamImage = false); ~ServerImageBuffer(); - + void init(bool hasServerImage = true, bool hasStreamImage = true); bool isEmpty(); bool isFull(); ServerImage& getReadImage(); - - ServerImage* getActiveImage(); - ServerImage* beginInsertImage(NetCamera&); void endInsertImage(); bool begin_frame(NetCamera&); - int add_frame(RPCReceive& rcv, std::atomic& stopped); int add_streamed_image(RPCReceive& rcv, std::atomic& stopped, bool save_streamed_image, const std::string &output_folder_path, int frame_id); bool wait_for_streamed_image(std::atomic& stopped ); const StreamedImage* get_streamed_image(); - + void set_output_folder_path(const std::string & output_folder_path); void reset(); @@ -57,7 +70,10 @@ public: void set_master_image_color_format(ClusterSessionParams::MasterImageColorFormat master_image_color_format_in) { master_image_color_format = master_image_color_format_in; } - +#if defined(OS_LINUX) && defined(WITH_CUDA) + void deleteNvEncoder(); +#endif + //only accessed by insertion thread std::vector streamedImageBuffer; @@ -72,6 +88,9 @@ public: tbb::mutex serverImageStopMutex; private: + static const int CUDA_DEVICE_NUM = 0; + static const string VIDEO_FULL_FILE_NAME; + bool denoiseImage(ServerImage &bufferImage, DenoisingContext & denoising_context); // In the master-workers configuration (which is target production setup) images are normalised // upon arriving at the master. It's done in different threads what benefits from the @@ -82,6 +101,55 @@ private: void normalizeImage(ServerImage &server_image); ClusterSessionParams::MasterImageColorFormat master_image_color_format = ClusterSessionParams::DEFAULT_MASTER_IMAGE_COLOR_FORMAT; + +#ifdef WITH_CUDA + void allocateCudaBuffer(long long unsigned int * buffer_ptr, size_t buffer_size, + const std::string & buffer_name); + void allocateImageCompressionCudaBuffers(const ServerImage &server_image); + void releaseImageCompressionCudaBuffers(); + void resetNvEncoder(const ServerImage &server_image); + + void copyServerImageToGpuMemory(const ServerImage &server_image, long long unsigned int &linear_image_buffer_gpu); +#endif // WITH_CUDA + void writeImageToVideoStreamFile(const std::vector &encoded_image) const; + +#ifdef WITH_CUDA + #ifdef WITH_OPTIX + void compressWithNvencoder(ServerImage &server_image, StreamedImage &streamed_image, MasterOptixDenoiser* optix_denoiser); + void compressWithTurbojpeg(ServerImage &server_image, StreamedImage &streamed_image, MasterOptixDenoiser* optix_denoiser); + #else + void compressWithNvencoder(ServerImage &server_image, StreamedImage &streamed_image); + void compressWithTurbojpeg(ServerImage &server_image, StreamedImage &streamed_image); + #endif // WITH_OPTIX +#else + void compressWithTurbojpeg(ServerImage &server_image, StreamedImage &streamed_image); +#endif // WITH_CUDA + + std::string output_folder_path; + std::string encoded_videostream_filename; + + bool image_buffers_allocated = false; + + // Use long long unsigned int instead of CUdeviceptr since cuda headers are not included + // in this header file due to server_image_buffer.h is included from the cycles/session/buffers.cpp + // via libcluster/net_server.h but cycles/session unaware about cuda so compilation fails. + // To minimize changes to the cycles/session and keep it cuda independent cuda headers are + // included in the server_image_buffer.cpp + // We may come up with better incapsulation and libcluster interface towards the Blender code. + long long unsigned int nvencoder_input_buffer_gpu = 0; + + // When rendered image is denoised by OIDN denoiser or is not denoised at all + // it's hosted in the CPU memory. + // In these cases we copy such images into this CUDA memory when nvencoder is used for the image compression + // so we can do gamma correction and rgb to yuv conversion before passing the image to the nvencoder. + long long unsigned int linear_image_buffer_gpu = 0; + + // image compressors +#if defined(OS_LINUX) && defined(WITH_CUDA) + std::unique_ptr nv_encoder_uptr; + std::unique_ptr nv_decoder_uptr; +#endif + std::unique_ptr turbojpeg_compressor_uptr; }; } // cgr_libcluster diff --git a/intern/cycles/cluster_rendering/libcluster/streamed_image.cpp b/intern/cycles/cluster_rendering/libcluster/streamed_image.cpp index 5e38521f6d3..e03696c8b51 100644 --- a/intern/cycles/cluster_rendering/libcluster/streamed_image.cpp +++ b/intern/cycles/cluster_rendering/libcluster/streamed_image.cpp @@ -1,15 +1,41 @@ #include "image_io_util.h" -#include // for tjFree #include "./utils/timer.h" // for scoped_timer #include "./utils/logging.h" #include "streamed_image.h" #include "net_simple.h" +#include "compression/turbojpeg_compressor.h" + namespace cgr_libcluster { using OpenImageIO_v2_4::ImageOutput; using OpenImageIO_v2_4::TypeDesc; +StreamedImage::StreamedImage() : allocated(false), w(0), h(0), + jpeg_compressor_uptr(new TurbojpegCompressor()) { +} + +StreamedImage::~StreamedImage(){ +} + +StreamedImage::StreamedImage(const StreamedImage& ) : + jpeg_compressor_uptr(new TurbojpegCompressor()) { +} + +StreamedImage::StreamedImage( StreamedImage&& ) : + jpeg_compressor_uptr(new TurbojpegCompressor()) { +} + +StreamedImage& StreamedImage::operator=(const StreamedImage& ) { + jpeg_compressor_uptr.reset(new TurbojpegCompressor()); + return *this; +} + +StreamedImage& StreamedImage::operator=(StreamedImage&& ) { + jpeg_compressor_uptr.reset(new TurbojpegCompressor()); + return *this; +} + size_t StreamedImage::read(RPCReceive &rcv) { size_t buf_size = 0; @@ -18,163 +44,47 @@ size_t StreamedImage::read(RPCReceive &rcv) NetCamera net_camera; rcv.read_buffer(&net_camera, sizeof(NetCamera)); + // log line below is used by get_metrics.py to calculate stats. If you change it + // please make sure get_metrics.py still works correctly. Update if needed. LOG(INFO) << "net camera received for frame: " << net_camera.frame << " " << net_camera.cam_width << " " << net_camera.cam_height; initImage(net_camera); - std::vector cbuffer(buf_size); - rcv.read_buffer(reinterpret_cast(cbuffer.data()), buf_size); - LOG(INFO) << "read image for frame: " << camera.frame << " with sample: " << camera.sampleCount << " size: " << buf_size; - scoped_timer dec_timer; - if (!decompress(cbuffer)) { - buf_size = 0; + if(compressed_image.size() != buf_size) { + compressed_image.resize(buf_size); } - LOG(INFO) << "decompress image for frame: " << camera.frame << " " << dec_timer.elapsed(); + rcv.read_buffer(reinterpret_cast(compressed_image.data()), buf_size); + LOG(INFO) << "read image for frame: " << camera.frame << " with sample: " << camera.sampleCount << " size: " << buf_size; return buf_size; } size_t StreamedImage::write(RPCSend &snd) { - scoped_timer compression_timer; - uint8_t *jpeg_data = NULL; - size_t buf_size = compress(jpeg_data); - LOG(INFO) << "compression time for frame: " << camera.frame << " " << compression_timer.elapsed(); - if (buf_size > 0) { - // get_metrics.py script depends on formatting of this log line, it you change it, + const size_t compressed_image_size = compressed_image.size(); + if (compressed_image_size > 0) { + // get_metrics.py script depends on formatting of this log line, if you change it, // please make sure that script still works or update it accordingly - LOG(INFO) << "stream image for frame: " << camera.frame << " with sample " << camera.sampleCount << " quality: " << camera.compression_quality << " buffer size " << buf_size + sizeof(buf_size) + sizeof(NetCamera); + LOG(INFO) << "stream image for frame: " << camera.frame << " with sample " << camera.sampleCount << " quality: " << + camera.compression_quality << " buffer size " << compressed_image_size + sizeof(compressed_image_size) + sizeof(NetCamera); snd.write(); - snd.write_buffer(&buf_size, sizeof(buf_size)); + snd.write_buffer(&compressed_image_size, sizeof(compressed_image_size)); snd.write_buffer(&camera, sizeof (NetCamera) ); - snd.write_buffer(jpeg_data, buf_size); + snd.write_buffer(compressed_image.data(), compressed_image_size); } - if (jpeg_data) tjFree(jpeg_data); - return buf_size; + return compressed_image_size; } -#ifdef WITH_WEBRTC -size_t StreamedImage::write(cgr_streaming::WebrtcPeer *webrtcPeer) -{ - if (webrtcPeer == nullptr) { - LOG(ERROR) << "webrtcPeer is nullptr"; - return 0; - } - size_t buf_size = getBufferSize(); - if (buf_size > 0) { - // TODO(fangy): Might need to work with pmishchuk@ to update the LOG line below so get_metrics.py works when WebRTC is used. - // get_metrics.py script depends on formatting of this log line, it you change it, - // please make sure that script still works or update it accordingly - LOG(INFO) << "stream image for frame: " << camera.frame << " with sample " << camera.sampleCount << " quality: " << camera.compression_quality << " buffer size " << buf_size + sizeof(buf_size) + sizeof(NetCamera) << " using WebRTC"; - int width = 0, height = 0; - void *image_buffer = getImage(width, height); - LOG(INFO) << "WebRTC sendFrame id " << getFrame() << " width " << width << " height " << height; - scoped_timer send_frame_timer; - cgr_streaming::WebrtcFrame frame(getFrame(), static_cast(image_buffer), width, height); - if (!webrtcPeer->sendFrame(frame)) { - LOG(ERROR) << "failed to send frame via WebRTC"; - } - LOG(INFO) << "send frame time for frame number: " << camera.frame << " " << send_frame_timer.elapsed(); - } - return buf_size; -} -#endif - - -//#define TIME_JPEG -size_t StreamedImage::compress(unsigned char* &jpeg_image) -{ - // Convert buffer to unsigned char * 3 channels - const int subsampling = TJSAMP_444; - size_t jpeg_length = 0; // tjCompress2 will allocate the jpeg_image buffer - jpeg_image = nullptr; - -#ifdef TIME_JPEG - struct timespec start_time, end_time; - clock_gettime(CLOCK_MONOTONIC, &start_time); -#endif - tjhandle jpeg_compressor = tjInitCompress(); - if (jpeg_compressor == nullptr) { - LOG(ERROR) << "Cannot initialize JPEG compressor"; - return 0; - } - void* src_buffer = byte_buffer.data(); - int jpeg_error = tjCompress2(jpeg_compressor, - (unsigned char*) src_buffer, - w, - 0, - h, - TJPF_RGB, - &jpeg_image, - (unsigned long *)&jpeg_length, - subsampling, - camera.compression_quality, - TJFLAG_FASTDCT); - tjDestroy(jpeg_compressor); - if (jpeg_error < 0) { - const char *jpeg_error_str = tjGetErrorStr(); - LOG(ERROR) << "JPEG compression error: " << jpeg_error_str; - return 0; - } - -#ifdef TIME_JPEG - clock_gettime(CLOCK_MONOTONIC, &end_time); - // ms time - double elapsed_time = (end_time.tv_nsec - start_time.tv_nsec) / 1e6; - LOG(INFO) << "TIMING: JPEG compression: " << elapsed_time << "ms" - << ", resolution " << w << "x" << h - << ", sizes " << src_buffer.size() << " (" << jpeg_length << ")"; -#endif - return jpeg_length; +void StreamedImage::copyInCompressedImage(const uint8_t * compressed_image_ptr, const size_t size_in_bytes) { + // for uint8_t the number_of_elements is the same as size_in_bytes + const size_t number_of_elements = size_in_bytes; + compressed_image.assign(compressed_image_ptr, compressed_image_ptr + number_of_elements); } -bool StreamedImage::decompress(std::vector &cbuffer) -{ -#ifdef TIME_JPEG - struct timespec start_time, end_time; - clock_gettime(CLOCK_MONOTONIC, &start_time); -#endif - /* Use TurboJPEG to decompress the buffer */ - int subsampling = 0; - tjhandle jpeg_decompressor = tjInitDecompress(); - if (jpeg_decompressor == nullptr) { - LOG(ERROR) << "Cannot initialize JPEG decompressor"; - return false; - } - int jpeg_error = tjDecompressHeader2(jpeg_decompressor, cbuffer.data(), - cbuffer.size(), &w, &h, &subsampling); - if (jpeg_error < 0) { - LOG(ERROR) << "Cannot decode JPEG header from StreamedImage"; - tjDestroy(jpeg_decompressor); - return false; - } +std::vector& StreamedImage::getCompressedImage() { + return compressed_image; +} - std::vector dst_buffer(w * h * 3); - //void *dst_buffer = byte_buffer.data(); - jpeg_error = tjDecompress2(jpeg_decompressor, - cbuffer.data(), - cbuffer.size(), - (unsigned char*) dst_buffer.data(), - w, - 0, - h, - TJPF_RGB, - TJFLAG_ACCURATEDCT); - tjDestroy(jpeg_decompressor); - if (jpeg_error < 0) { - const char *jpeg_error_str = tjGetErrorStr(); - LOG(ERROR) << "JPEG decompression error" << jpeg_error_str; - return false; - } - putImage(w, h, dst_buffer.data()); -#ifdef TIME_JPEG - clock_gettime(CLOCK_MONOTONIC, &end_time); - // ms time - double elapsed_time = (end_time.tv_nsec - start_time.tv_nsec) / 1e6; - LOG(INFO) << "TIMING: JPEG decompression: " << elapsed_time << "ms" - << ", resolution " << w << "x" << h - << ", sizes " << cbuffer.size() << " (" << dst_buffer.size() << ")"; -#endif - - return true; +std::vector& StreamedImage::getByteBuffer() { + return byte_buffer; } bool StreamedImage::saveImage(const std::string &output_folder_path, const std::string &file_name_prefix, int frame_id) { @@ -183,4 +93,12 @@ bool StreamedImage::saveImage(const std::string &output_folder_path, const std:: return ImageIOUtil::saveFrame(file_path, TypeDesc::UCHAR, image_output.get(),getByteBuffer().data(), w, h); } +std::vector& StreamedImage::getRgbImageBuffer() { + return byte_buffer; +} + +NetCamera& StreamedImage::getNetCamera() { + return camera; +} + } // cgr_libcluster diff --git a/intern/cycles/cluster_rendering/libcluster/streamed_image.h b/intern/cycles/cluster_rendering/libcluster/streamed_image.h index 04008e42398..4b744a2c823 100644 --- a/intern/cycles/cluster_rendering/libcluster/streamed_image.h +++ b/intern/cycles/cluster_rendering/libcluster/streamed_image.h @@ -10,29 +10,26 @@ namespace cgr_libcluster { class PathTraceDisplay; class RPCSend; class RPCReceive; +class TurbojpegCompressor; class StreamedImage { public: - StreamedImage() : allocated(false), w(0), h(0) {} - ~StreamedImage() {} - void putImage(int width, int height, const void *image) - { - if (w < width || h < height) { - w = width; - h = height; - byte_buffer.resize(w*h); - } - w = width; - h = height; - memcpy(byte_buffer.data(), image, sizeof(cgr_libcluster::uchar3) * width * height); - } + StreamedImage(); + ~StreamedImage(); + StreamedImage(const StreamedImage& ); + StreamedImage(StreamedImage&& ); + StreamedImage& operator=(const StreamedImage& ); + StreamedImage& operator=(StreamedImage&& ); + +public: const void* getImage(int &width, int &height) const { width = w; height = h; return byte_buffer.data(); } + void initImage(NetCamera &cam) { camera = cam; if (allocated) { @@ -48,26 +45,28 @@ class StreamedImage { size_t getBufferSize() const { return w * h * sizeof(cgr_libcluster::uchar3); } size_t read(RPCReceive &rcv); size_t write(RPCSend &snd); -#ifdef WITH_WEBRTC - size_t write(cgr_streaming::WebrtcPeer *webrtcPeer); -#endif int getFrame() const { return camera.frame; } - // Compress the StreamedImage into a jpeg stream stored in cbuffer - size_t compress(unsigned char*&) ; - // Decompress a jpeg stream cbuffer into the StreamedImage - bool decompress(std::vector &cbuffer); - bool saveImage(const std::string &output_folder_path, const std::string &file_name_prefix, int frame_id); - std::vector& getByteBuffer() {return byte_buffer; } + void copyInCompressedImage(const uint8_t * compressed_image_ptr, const size_t size_in_bytes); + std::vector& getByteBuffer(); + std::vector& getCompressedImage(); + std::vector& getRgbImageBuffer(); + NetCamera& getNetCamera(); private: bool allocated; int w; int h; - std::vector byte_buffer; + std::vector byte_buffer; // raw image as rgb + std::vector compressed_image; + NetCamera camera; + // Have TurbojpegCompressor as pointer here so we can use forward declaration + // and avoid including TurboJpeg header in this header and minimize changes in the Blender code + // which includes this header but is not aware about TurboJpeg + std::unique_ptr jpeg_compressor_uptr; }; } // cgr_libcluster diff --git a/intern/cycles/cluster_rendering/libcluster/test/CMakeLists.txt b/intern/cycles/cluster_rendering/libcluster/test/CMakeLists.txt index 1fd5466bb7a..1fd97674fe2 100644 --- a/intern/cycles/cluster_rendering/libcluster/test/CMakeLists.txt +++ b/intern/cycles/cluster_rendering/libcluster/test/CMakeLists.txt @@ -34,11 +34,13 @@ set(LIBRARIES add_definitions(-DWITH_CYCLES_LOGGING) -if(WITH_CUDA_DYNLOAD) - list(APPEND LIBRARIES extern_cuew) - add_definitions(-DWITH_CUDA_DYNLOAD) -else() - list(APPEND LIBRARIES ${CUDA_CUDA_LIBRARY}) +if(WITH_CYCLES_DEVICE_CUDA) + if(WITH_CUDA_DYNLOAD) + list(APPEND LIBRARIES extern_cuew) + add_definitions(-DWITH_CUDA_DYNLOAD) + else() + list(APPEND LIBRARIES ${CUDA_CUDA_LIBRARY}) + endif() endif() if(WITH_CYCLES_LOGGING) diff --git a/intern/cycles/cluster_rendering/libcluster/test/denoising_context_test.cpp b/intern/cycles/cluster_rendering/libcluster/test/denoising_context_test.cpp index 262327512b4..5c1e1b90491 100644 --- a/intern/cycles/cluster_rendering/libcluster/test/denoising_context_test.cpp +++ b/intern/cycles/cluster_rendering/libcluster/test/denoising_context_test.cpp @@ -1,7 +1,9 @@ #include "testing/testing.h" #include "gmock/gmock.h" +#ifdef WITH_OPTIX #include #include +#endif #include "mocks.h" #include "denoising/denoising_context.h" @@ -21,10 +23,10 @@ public: bool is_denoising_passes_on, const ImageOutputProvider & image_output_provider) : MasterDenoiser(ClusterSessionParams::MasterDenoiser::MASTER_DENOISER_OIDN, save_denoise_io, save_every_n_images, output_folder_path, is_denoising_passes_on, image_output_provider) {}; - + MOCK_METHOD0(destructor, void()); virtual ~MockOidnMasterDenoiser() override { destructor(); }; - + virtual DenoisingResult denoise(ServerImage &server_image) override { return DenoisingResult::OK; }; }; @@ -35,7 +37,7 @@ public: bool is_denoising_passes_on, const ImageOutputProvider & image_output_provider) : MasterDenoiser(ClusterSessionParams::MasterDenoiser::MASTER_DENOISER_OPTIX, save_denoise_io, save_every_n_images, output_folder_path, is_denoising_passes_on, image_output_provider) {}; - + MOCK_METHOD0(destructor, void()); virtual ~MockOptixMasterDenoiser() override { destructor(); }; @@ -53,7 +55,7 @@ public: // Tests -TEST(LibclusterDenoisingContextTest, uninitialized_context_getDenoiser_returns_null) { +TEST(LibclusterDenoisingContextTest, uninitialized_context_getDenoiser_returns_null) { const bool save_denoise_io = false; const int save_every_n_images = 1; const bool is_denoising_passes_on = false; @@ -109,7 +111,7 @@ TEST(LibclusterDenoisingContextTest, init_creates_all_denoisers_success) { ASSERT_EQ(oidn_master_denoiser->getType(), ClusterSessionParams::MasterDenoiser::MASTER_DENOISER_OIDN); ASSERT_EQ(oidn_master_denoiser, mock_oidn_master_denoiser_ptr); -#ifdef WITH_OPTIX +#ifdef WITH_OPTIX MasterDenoiser * optix_master_denoiser = denoising_context.getDenoiser( ClusterSessionParams::MasterDenoiser::MASTER_DENOISER_OPTIX); ASSERT_TRUE(optix_master_denoiser != nullptr); diff --git a/intern/cycles/cluster_rendering/libcluster/utils/cuda_utils.h b/intern/cycles/cluster_rendering/libcluster/utils/cuda_utils.h new file mode 100644 index 00000000000..5aa45e99354 --- /dev/null +++ b/intern/cycles/cluster_rendering/libcluster/utils/cuda_utils.h @@ -0,0 +1,46 @@ +#ifndef __CUDA_UTILS_H__ +#define __CUDA_UTILS_H__ + +namespace cgr_libcluster { + +#define THROW_IF_ERROR true +#define DO_NOT_THROW false + +#define CUDA_API_CALL(cuda_api, throw_if_error) \ + do { \ + CUresult cu_result = cuda_api; \ + if (cu_result != CUDA_SUCCESS) { \ + const char *error_name = NULL; \ + cuGetErrorName(cu_result, &error_name); \ + std::string message = std::string("ERROR. ") + #cuda_api + " failed with error: " + std::string(error_name); \ + LOG(ERROR) << message ; \ + if (throw_if_error) { \ + throw std::runtime_error(message); \ + } \ + } \ + } while (0) + +#define OPTIX_API_CALL(optix_api, throw_if_error) \ + do { \ + OptixResult optix_result = optix_api; \ + if(optix_result == OPTIX_ERROR_UNSUPPORTED_ABI_VERSION) { \ + std::string message = std::string("ERROR. ") + #optix_api + " failed due to installed driver " \ + "does not support ABI version: " + std::to_string(OPTIX_ABI_VERSION); \ + LOG(ERROR) << message ; \ + if(throw_if_error) { \ + throw std::runtime_error(message); \ + } \ + } else if(optix_result != OPTIX_SUCCESS) { \ + std::string message = std::string("ERROR. ") + #optix_api + " failed with error: " + \ + std::to_string(optix_result); \ + LOG(ERROR) << message ; \ + if(throw_if_error) { \ + throw std::runtime_error(message); \ + } \ + } \ + } while (0) + + +} // end of namespace cgr_libcluster + +#endif diff --git a/intern/cycles/cluster_rendering/libcluster/utils/image.cpp b/intern/cycles/cluster_rendering/libcluster/utils/image.cpp new file mode 100644 index 00000000000..3cbd81bea22 --- /dev/null +++ b/intern/cycles/cluster_rendering/libcluster/utils/image.cpp @@ -0,0 +1,41 @@ +#include + +#include "vector_types.h" // for uchar3 + +#include "image.h" + +namespace cgr_libcluster { + +#define PIXEL_THREAD_COUNT 60 + +int clip(int n, int lower, int upper) { + return std::max(lower, std::min(n, upper)); +} + +void yuv2Rgb(uint8_t *yuv_image, int width, int height, + std::vector * rgb_image) { + const int num_pixels = width * height; + for(int i = 0; i < num_pixels; ++i) { + const int pixel_row = i/width; + const int pixel_column = i - (width * pixel_row); + const int uv_row = pixel_row / 2; + const int uv_column = pixel_column / 2; + const int u_i = num_pixels + uv_row * width + uv_column*2; + const int v_i = u_i + 1; + const int y = yuv_image[i]; + const int u = yuv_image[u_i]; + const int v = yuv_image[v_i]; + const int c = y - 16; + const int d = u - 128; + const int e = v - 128; + const uint8_t r = clip((298*c + 409*e + 128) >> 8, 0, 255); + const uint8_t g = clip((298*c - 100*d - 208*e + 128) >> 8, 0, 255); + const uint8_t b = clip((298*c + 516*d + 128) >> 8, 0, 255); + uchar3 & rgb_pixel = (*rgb_image)[i]; + rgb_pixel.x = r; + rgb_pixel.y = g; + rgb_pixel.z = b; + } +} + +} diff --git a/intern/cycles/cluster_rendering/libcluster/utils/image.h b/intern/cycles/cluster_rendering/libcluster/utils/image.h new file mode 100644 index 00000000000..fda4c7e0ed3 --- /dev/null +++ b/intern/cycles/cluster_rendering/libcluster/utils/image.h @@ -0,0 +1,12 @@ +#ifndef __IMAGE_H__ +#define __IMAGE_H__ + +namespace cgr_libcluster { + +// This function expects that rgb_image is not null and is properly sized by caller to accomodate all pixels +void yuv2Rgb(uint8_t *yuv_image, int width, int height, + std::vector * rgb_image); + +} + +#endif diff --git a/intern/cycles/cluster_rendering/libcluster_cuda_kernels/CMakeLists.txt b/intern/cycles/cluster_rendering/libcluster_cuda_kernels/CMakeLists.txt new file mode 100644 index 00000000000..68aa411802d --- /dev/null +++ b/intern/cycles/cluster_rendering/libcluster_cuda_kernels/CMakeLists.txt @@ -0,0 +1,8 @@ + +file(GLOB SRC *.cpp *.cu) + +if(NOT WITH_CYCLES_CUDA_BINARIES) + find_package(CUDA) +endif() + +cuda_add_library(cycles_libcluster_cuda_kernels "${LIB}" ${SRC} ${SRC_HEADERS}) diff --git a/intern/cycles/cluster_rendering/libcluster_cuda_kernels/gpu_image_utils.cu b/intern/cycles/cluster_rendering/libcluster_cuda_kernels/gpu_image_utils.cu new file mode 100644 index 00000000000..77106c877ac --- /dev/null +++ b/intern/cycles/cluster_rendering/libcluster_cuda_kernels/gpu_image_utils.cu @@ -0,0 +1,91 @@ + +#include +#include + +#include + +#include "gpu_image_utils.h" + +constexpr int NUM_COLOR_CHANNELS = 3; +constexpr int NUM_THREADS = 10240; +constexpr int NUM_THREADS_PER_BLOCK = 512; + +template +struct PixelRgb { + T r; + T g; + T b; +}; + +__device__ +float color_linear_to_srgb(float c) { + if (c < 0.0031308f) + return (c < 0.0f) ? 0.0f : c * 12.92f; + else + return 1.055f * powf(c, 1.0f / 2.4f) - 0.055f; +} + +// Simple RGB to YUV NV12 (4:2:0 12 bpp) conversion based on a description in this doc: +// https://learn.microsoft.com/en-us/windows/win32/medfound/recommended-8-bit-yuv-formats-for-video-rendering +// It only supports resolutions of even numbers to keep it simple. Supporting resolutions with odd numbers +// adds extra complexity to support edge cases what is unnecessary commplication for not required +// usecases since we only need to support standard resolutions like +// 720p, 1080p, 1024x1024, 2048x2048 etc. which are all even numbers. +// It looks like RGB to YUV conversion is available in the NVIDIA Performance Primitives (npp) +// https://developer.nvidia.com/npp +// but we currently do not use npp so I won't introduce this dependency just for single method. +// If there is a standard or better GPU implementation of RGB-to-YUV conversion available we can switch to it. +__global__ +void gpuRawRgbToGammaCorrectedYuvNv12Impl(CUdeviceptr cu_image_in_ptr, int width, int height, + bool useSrgbColorSpace, CUdeviceptr cu_image_yuv_nv12_out_ptr) { + const int num_pixels = width * height; + const int num_threads_per_block = blockDim.x; + const int num_blocks_in_grid = gridDim.x; + const int total_num_of_threads = num_threads_per_block * num_blocks_in_grid; + const int num_pixels_to_process_in_thread = (num_pixels + total_num_of_threads) / total_num_of_threads; + const int start_i = blockIdx.x * num_threads_per_block * num_pixels_to_process_in_thread + + threadIdx.x * num_pixels_to_process_in_thread; + const int end_i = min(start_i + num_pixels_to_process_in_thread, num_pixels); + const PixelRgb * cu_image_in_rgb_float_ptr = (PixelRgb*)cu_image_in_ptr; + uint8_t* cu_image_yuv_nv12_out_uint_ptr = (uint8_t*)cu_image_yuv_nv12_out_ptr; + float r_f, g_f, b_f; + uint8_t r, g, b; + for(int i = start_i; i < end_i; ++i) { + // Gamma correction + const PixelRgb & raw_pixel = cu_image_in_rgb_float_ptr[i]; + if(useSrgbColorSpace) { + r_f = color_linear_to_srgb(raw_pixel.r); + g_f = color_linear_to_srgb(raw_pixel.g); + b_f = color_linear_to_srgb(raw_pixel.b); + } else { + r_f = raw_pixel.r; + g_f = raw_pixel.g; + b_f = raw_pixel.b; + } + r = (uint8_t)(__saturatef(r_f) * 255.0f + 0.5f); + g = (uint8_t)(__saturatef(g_f) * 255.0f + 0.5f); + b = (uint8_t)(__saturatef(b_f) * 255.0f + 0.5f); + // Convert sRGB to YUV + const int pixel_row = i/width;; + const int num_pixels_above_the_current_row = pixel_row * width; + const int pixel_column = i - num_pixels_above_the_current_row; + const uint8_t y = (( 66 * r + 129 * g + 25 * b + 128) >> 8) + 16; + cu_image_yuv_nv12_out_uint_ptr[i] = y; + if(pixel_column % 2 == 0 && pixel_row % 2 == 0) { + const uint8_t u = ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128; + const uint8_t v = ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128; + const int u_i = num_pixels + i - (pixel_row/2 * width); + cu_image_yuv_nv12_out_uint_ptr[u_i] = u; + cu_image_yuv_nv12_out_uint_ptr[u_i + 1] = v; + } + } +} + +void gpuRawRgbToGammaCorrectedYuvNv12(CUdeviceptr cu_image_in_ptr, int width, int height, + bool useSrgbColorSpace, CUstream cuda_stream, CUdeviceptr cu_image_yuv_nv12_out_ptr) { + const int num_blocks = NUM_THREADS / NUM_THREADS_PER_BLOCK; + const size_t size_of_dynamically_allocated_shared_memory = 0; + gpuRawRgbToGammaCorrectedYuvNv12Impl<<>>( + cu_image_in_ptr, width, height, useSrgbColorSpace, cu_image_yuv_nv12_out_ptr); +} diff --git a/intern/cycles/cluster_rendering/libcluster_cuda_kernels/gpu_image_utils.h b/intern/cycles/cluster_rendering/libcluster_cuda_kernels/gpu_image_utils.h new file mode 100644 index 00000000000..e1c1532e121 --- /dev/null +++ b/intern/cycles/cluster_rendering/libcluster_cuda_kernels/gpu_image_utils.h @@ -0,0 +1,7 @@ +#ifndef __IMAGE_UTILS_H__ +#define __IMAGE_UTILS_H__ + +void gpuRawRgbToGammaCorrectedYuvNv12(CUdeviceptr cu_image_in_ptr, int width, int height, + bool useSrgbColorSpace, CUstream cuda_stream, CUdeviceptr cu_image_yuv_nv12_out_ptr); + +#endif diff --git a/intern/cycles/cluster_rendering/profiling/get_metrics.py b/intern/cycles/cluster_rendering/profiling/get_metrics.py index 9e2d6ada6e9..a2e9c2b6aa6 100755 --- a/intern/cycles/cluster_rendering/profiling/get_metrics.py +++ b/intern/cycles/cluster_rendering/profiling/get_metrics.py @@ -979,8 +979,8 @@ def get_resolution_stats(config): print("Can not get resolution from the log line, it could be corrupted. Log line:\n" + log_line, file = sys.stderr) log_line = log_file.readline() continue - width = log_items[-3] - height = log_items[-2] + width = log_items[-2] + height = log_items[-1] resolution = width + "x" + height if current_resolution_stats is None or current_resolution_stats.name != resolution: current_resolution_stats = MetricStats(resolution) diff --git a/intern/cycles/cluster_rendering/profiling/init_server.py b/intern/cycles/cluster_rendering/profiling/init_server.py index ac016af5883..d29cce2b317 100644 --- a/intern/cycles/cluster_rendering/profiling/init_server.py +++ b/intern/cycles/cluster_rendering/profiling/init_server.py @@ -16,7 +16,7 @@ from pprint import pprint DEFAULT_NUM_GPU = 10 -SUPPORTED_DEVICE_TYPES = ["OPTIX", "CUDA"] +SUPPORTED_DEVICE_TYPES = ["OPTIX", "CUDA", "METAL"] class ProcUnitType(enum.Enum): CPU = "CPU" GPU = "GPU" diff --git a/intern/cycles/device/cuda/device_impl.cpp b/intern/cycles/device/cuda/device_impl.cpp index 63dd278c19e..7a3d9c58513 100644 --- a/intern/cycles/device/cuda/device_impl.cpp +++ b/intern/cycles/device/cuda/device_impl.cpp @@ -103,12 +103,16 @@ CUDADevice::CUDADevice(const DeviceInfo &info, Stats &stats, Profiler &profiler) } /* Create context. */ - result = cuCtxCreate(&cuContext, ctx_flags, cuDevice); + +// FRL_CGR BEGIN + // result = cuCtxCreate(&cuContext, ctx_flags, cuDevice); + result = cuDevicePrimaryCtxRetain(&cuContext, cuDevice); if (result != CUDA_SUCCESS) { set_error(string_printf("Failed to create CUDA context (%s)", cuewErrorString(result))); return; } +// FRL_CGR END int major, minor; cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId); @@ -116,14 +120,20 @@ CUDADevice::CUDADevice(const DeviceInfo &info, Stats &stats, Profiler &profiler) cuDevArchitecture = major * 100 + minor * 10; /* Pop context set by cuCtxCreate. */ - cuCtxPopCurrent(NULL); +// FRL_CGR BEGIN +// with cuDevicePrimaryCtxRetain do not need to pop current +// cuCtxPopCurrent(NULL); +// FRL_CGR_END } CUDADevice::~CUDADevice() { texture_info.free(); - cuda_assert(cuCtxDestroy(cuContext)); +// FRL_CGR BEGIN + // do not destroy primary context + //cuda_assert(cuCtxDestroy(cuContext)); +// FRL_CGR END } bool CUDADevice::support_device(const uint /*kernel_features*/) diff --git a/intern/cycles/device/cuda/device_impl.h b/intern/cycles/device/cuda/device_impl.h index ea83125a82e..9044ac68f00 100644 --- a/intern/cycles/device/cuda/device_impl.h +++ b/intern/cycles/device/cuda/device_impl.h @@ -73,7 +73,7 @@ class CUDADevice : public GPUDevice { virtual void copy_host_to_device(void *device_pointer, void *host_pointer, size_t size, size_t offset) override; void host_mem_alloc(size_t size, int aligment, void **p_mem) override; void host_mem_free(void *p_mem) override; - + void mem_alloc(device_memory &mem) override; void mem_copy_to(device_memory &mem) override; diff --git a/intern/cycles/device/optix/device_impl.cpp b/intern/cycles/device/optix/device_impl.cpp index 2f2f87f9bd2..6a61322d806 100644 --- a/intern/cycles/device/optix/device_impl.cpp +++ b/intern/cycles/device/optix/device_impl.cpp @@ -963,7 +963,7 @@ bool OptiXDevice::build_optix_bvh(BVHOptiX *bvh, // static thread_mutex mutex; // thread_scoped_lock lock(mutex); - //const CUDAContextScope scope(this); + const CUDAContextScope scope(this); //const bool use_fast_trace_bvh = (bvh->params.bvh_type == BVH_TYPE_STATIC); // FRL_CGR diff --git a/intern/cycles/session/session_frl.cpp b/intern/cycles/session/session_frl.cpp index 8c1a33dff5a..8b2907fb274 100644 --- a/intern/cycles/session/session_frl.cpp +++ b/intern/cycles/session/session_frl.cpp @@ -6,6 +6,7 @@ CCL_NAMESPACE_BEGIN using namespace std::chrono; +using cgr_libcluster::ClusterSessionParams; ccl::float3 &toCyclesFloat3(cgr_libcluster::float3 &value); cgr_libcluster::float3 &fromCyclesFloat3(ccl::float3 &value); @@ -82,6 +83,7 @@ void Session::logRenderSessionPanelSettings() { " num servers: " << params.cluster_session_params.num_servers << std::endl << " compression quality: " << params.cluster_session_params.master_compression_quality << std::endl << " master_image_color_format: " << params.cluster_session_params.master_image_color_format << std::endl << + " master_image_compressor: " << params.cluster_session_params.master_image_compressor << std::endl << " master denoiser: " << params.cluster_session_params.master_denoiser; } @@ -139,7 +141,7 @@ void Session::modify_shader_object(const std::string & object_name, const std::s attribute_name << " value is not set"; } -//helpers used by servers session to set buffer params and scene camera according to +//helpers used by servers session to set buffer params and scene camera according to //NetCamera command static void setBufferParams(BufferParams &buffer_params_, cgr_libcluster::NetCamera& netCamera) @@ -242,9 +244,9 @@ void Session::resetNetCamera(cgr_libcluster::NetCamera& netCamera) VLOG(3) << "set net integrator seed: " << integrator->get_seed(); } } - + tile_manager_.reset_scheduling(buffer_params_, get_effective_tile_size()); - render_scheduler_.reset(buffer_params_, netCamera.sampleCount, 0); + render_scheduler_.reset(buffer_params_, netCamera.sampleCount, 0); tile_manager_.update(buffer_params_, scene); params.samples = netCamera.sampleCount; @@ -302,10 +304,11 @@ bool Session::server_wait_for_camera() } } -//-------Methods used by client and standalone session to create NetCamera objects, serialize and transport (client) them +//-------Methods used by client and standalone session to create NetCamera objects, serialize and transport (client) them //------------------------------------------------------------------------------------------------------------------------- //used by client session to initliaze a NetCamera for transport -static void initializeNetCamera(cgr_libcluster::NetCamera& netCamera, Camera &cam, int s, int f, int sceneFrame, int iseed, cgr_libcluster::ClusterSessionParams& csp) +static void initializeNetCamera(cgr_libcluster::NetCamera& netCamera, Camera &cam, int s, int f, int sceneFrame, int iseed, + cgr_libcluster::ClusterSessionParams& csp) { netCamera.cam_matrix = fromCyclesTransform(cam.get_matrix()); netCamera.cam_type = fromCyclesCameraType(cam.get_camera_type()); @@ -329,11 +332,29 @@ static void initializeNetCamera(cgr_libcluster::NetCamera& netCamera, Camera &ca netCamera.master_denoiser = csp.master_denoiser; netCamera.master_image_color_format = csp.master_image_color_format; + netCamera.master_image_compressor = csp.master_image_compressor; VLOG(3) << "Constructed net camera: " << netCamera.cam_width << " " << netCamera.cam_height; } void Session::client_send_camera(int samples) { +#ifdef WITH_CUDA + if (theClient && + params.cluster_session_params.master_image_compressor == ClusterSessionParams::MASTER_IMAGE_COMPRESSOR_NVENCODER && + (scene->camera->get_full_width() % 2 || scene->camera->get_full_height() % 2)) { + std::string message = "NVENCODER compressor is requested for images with odd dimension: " + + std::to_string(scene->camera->get_full_width()) + "x" + std::to_string(scene->camera->get_full_height()) + + " Current implementation of NVENCODER only supports images with even dimensions.\ + To use NVENCODER compressor please resize image so it has even dimension like 1280x720"; + throw std::runtime_error(message); + } +#else + if(params.cluster_session_params.master_image_compressor == ClusterSessionParams::MASTER_IMAGE_COMPRESSOR_NVENCODER) { + throw std::runtime_error("ERROR. NVENCODER compressor is requested. Client is compiled without CUDA support\ + so has no nvencoder and will not be able to decode received images which are nvencoded.\ + Recompile with CUDA or use JPEG compressor instead. Terminating."); + } +#endif //TODO Modify object msg need to be handled properly if (theClient) { cgr_libcluster::ModifyObjectParams & modify_object_params = params.cluster_session_params.modify_object_params; @@ -349,7 +370,6 @@ void Session::client_send_camera(int samples) int scene_frame = scene->getCurrentFrame(); initializeNetCamera(netCamera, *(scene->camera), samples, frame_count, scene_frame, iseed, params.cluster_session_params); if (theClient) { - //client send NetCamera object to master theClient->send_camera(netCamera); @@ -380,9 +400,9 @@ void Session::client_set_modify_object_message(std::string & object_name, std::s bool Session::ready_to_reset() { bool ready_to_reset = path_trace_->ready_to_reset(); - - // - //this logic a bit complicated but it basically tracks + + // + //this logic a bit complicated but it basically tracks //timeout as between the last "not" ready_to_reset // if (ready_to_reset) { diff --git a/source/blender/makesdna/CMakeLists.txt b/source/blender/makesdna/CMakeLists.txt index 1a48b6e4928..6e2eb2ec158 100644 --- a/source/blender/makesdna/CMakeLists.txt +++ b/source/blender/makesdna/CMakeLists.txt @@ -5,4 +5,16 @@ if(WITH_FREESTYLE) add_definitions(-DWITH_FREESTYLE) endif() +#FRL_CLR_BEGIN + +if(WITH_CYCLES_DEVICE_CUDA) + add_definitions(-DWITH_CUDA) +endif() + +if(WITH_CYCLES_DEVICE_OPTIX) + add_definitions(-DWITH_OPTIX) +endif() + +#FRL_CLR_END + add_subdirectory(intern) diff --git a/source/blender/makesdna/DNA_camera_types.h b/source/blender/makesdna/DNA_camera_types.h index 10a6c936be1..26d6cd56166 100644 --- a/source/blender/makesdna/DNA_camera_types.h +++ b/source/blender/makesdna/DNA_camera_types.h @@ -146,6 +146,7 @@ enum { CAM_SHOWSENSOR = (1 << 8), CAM_SHOW_SAFE_CENTER = (1 << 9), CAM_SHOW_BG_IMAGE = (1 << 10), + CAM_SHOW_KINECT_AZURE = (1 << 11), // Custom FB }; /* Sensor fit */ diff --git a/source/blender/makesdna/DNA_node_types.h b/source/blender/makesdna/DNA_node_types.h index 9e2d4347b15..251645e9ba9 100644 --- a/source/blender/makesdna/DNA_node_types.h +++ b/source/blender/makesdna/DNA_node_types.h @@ -1657,6 +1657,7 @@ typedef struct NodeShaderMix { #define SHD_GLOSSY_GGX 2 #define SHD_GLOSSY_ASHIKHMIN_SHIRLEY 3 #define SHD_GLOSSY_MULTI_GGX 4 +#define SHD_GLOSSY_GGX_FRESNEL_REFRACTION 5 /* vector transform */ #define SHD_VECT_TRANSFORM_TYPE_VECTOR 0 diff --git a/source/blender/makesdna/DNA_scene_types.h b/source/blender/makesdna/DNA_scene_types.h index 93184d77c90..a864ed39a7b 100644 --- a/source/blender/makesdna/DNA_scene_types.h +++ b/source/blender/makesdna/DNA_scene_types.h @@ -850,7 +850,8 @@ typedef struct RenderData { int schedule_modify_object_message; int device_scale_factor; int master_image_color_format; - char _pad_frl_fields[4]; // add padding if needed to satify aligning check (by 8 bytes) + int master_image_compressor; + //char _pad_frl_fields[4]; // add padding if needed to satify aligning check (by 8 bytes) /* WebRTC related */ int use_webrtc; @@ -898,7 +899,9 @@ typedef enum { typedef enum { MASTER_DENOISER_NONE = 0, MASTER_DENOISER_OIDN = 1, +#ifdef WITH_OPTIX MASTER_DENOISER_OPTIX = 2, +#endif //WITH_OPTIX MASTER_DENOISER_BARCELONA = 3, } eMasterDenoiser; @@ -908,6 +911,14 @@ typedef enum { MASTER_IMAGE_COLOR_FORMAT_SRGB = 2, } eMasterImageColorFormat; +/* RenderData.master_image_compressor */ +typedef enum { + MASTER_IMAGE_COMPRESSOR_JPEG = 1, +#ifdef WITH_CUDA + MASTER_IMAGE_COMPRESSOR_NVENCODER = 2, +#endif //WITH_CUDA +} eMasterImageCompressor; + /* RenderData.peer_connection_protocol */ typedef enum { PEER_CONNECTION_PROTOCOL_ANY = 0, diff --git a/source/blender/makesdna/intern/CMakeLists.txt b/source/blender/makesdna/intern/CMakeLists.txt index 944aec1f8bd..fc1c56be34b 100644 --- a/source/blender/makesdna/intern/CMakeLists.txt +++ b/source/blender/makesdna/intern/CMakeLists.txt @@ -20,6 +20,17 @@ set(LIB ) add_definitions(-DWITH_DNA_GHASH) +#FRL_CLR_BEGIN + +#if(WITH_CYCLES_DEVICE_CUDA) +# add_definitions(-DWITH_CUDA) +#endif() + +if(WITH_CYCLES_DEVICE_OPTIX) + add_definitions(-DWITH_OPTIX) +endif() + +#FRL_CLR_END # Needed for `mallocn.c`. if(HAVE_MALLOC_STATS_H) diff --git a/source/blender/makesrna/intern/CMakeLists.txt b/source/blender/makesrna/intern/CMakeLists.txt index 98c2b2e53fd..9467e6e15d5 100644 --- a/source/blender/makesrna/intern/CMakeLists.txt +++ b/source/blender/makesrna/intern/CMakeLists.txt @@ -241,6 +241,18 @@ if(WITH_PYTHON) ) endif() +#FRL_CLR_BEGIN + + if(WITH_CYCLES_DEVICE_CUDA) + add_definitions(-DWITH_CUDA) + endif() + + if(WITH_CYCLES_DEVICE_OPTIX) + add_definitions(-DWITH_OPTIX) + endif() + +#FRL_CLR_END + if(WITH_IMAGE_OPENEXR) add_definitions(-DWITH_OPENEXR) endif() diff --git a/source/blender/makesrna/intern/rna_scene.c b/source/blender/makesrna/intern/rna_scene.c index b3e0f2d29e0..a3f099db897 100644 --- a/source/blender/makesrna/intern/rna_scene.c +++ b/source/blender/makesrna/intern/rna_scene.c @@ -6282,12 +6282,14 @@ static void rna_def_scene_render_data(BlenderRNA *brna) "OIDN", 0, "OpenImageDenoise", - "Master denoises images with Intel Open Image Denoise"}, - {MASTER_DENOISER_OPTIX, + "Master denoises images with Intel Open Image Denoise"}, +#ifdef WITH_OPTX + {MASTER_DENOISER_OPTIX, "OPTIX", 0, "OptiX", "Master denoises images with NVIDIA OptiX AI-Accelerated denoiser"}, +#endif {MASTER_DENOISER_BARCELONA, "BARCELONA", 0, @@ -6310,6 +6312,22 @@ static void rna_def_scene_render_data(BlenderRNA *brna) {0, NULL, 0, NULL, NULL}, }; + static const EnumPropertyItem master_image_compressor_items[] = { + {MASTER_IMAGE_COMPRESSOR_JPEG, + "JPEG", + 0, + "JPEG", + "Master compresses images with JPEG"}, +#ifdef WITH_CUDA + {MASTER_IMAGE_COMPRESSOR_NVENCODER, + "NVENCODER", + 0, + "NVENCODER", + "Master compresses images with NVENCODER."}, +#endif //WITH_CUDA + {0, NULL, 0, NULL, NULL}, + }; + static const EnumPropertyItem render_session_mode_items[] = { {RENDER_SESSION_MODE_STANDALONE, "STANDALONE", @@ -6455,6 +6473,12 @@ static void rna_def_scene_render_data(BlenderRNA *brna) RNA_def_property_ui_text(prop, "Color format", ""); RNA_def_property_update(prop, NC_SCENE | ND_RENDER_OPTIONS, NULL); + prop = RNA_def_property(srna, "master_image_compressor", PROP_ENUM, PROP_NONE); + RNA_def_property_enum_items(prop, master_image_compressor_items); + RNA_def_property_clear_flag(prop, PROP_ANIMATABLE); + RNA_def_property_ui_text(prop, "Image compressor", ""); + RNA_def_property_update(prop, NC_SCENE | ND_RENDER_OPTIONS, NULL); + // Modify object name section prop = RNA_def_property(srna, "modify_object_name", PROP_STRING, PROP_NONE); RNA_def_property_clear_flag(prop, PROP_ANIMATABLE); diff --git a/source/creator/CMakeLists.txt b/source/creator/CMakeLists.txt index 7295556b5e2..13fdae462ba 100644 --- a/source/creator/CMakeLists.txt +++ b/source/creator/CMakeLists.txt @@ -111,6 +111,18 @@ if(WITH_OPENCOLORIO) add_definitions(-DWITH_OCIO) endif() +#FRL_CLR_BEGIN + +if(WITH_CYCLES_DEVICE_CUDA) + add_definitions(-DWITH_CUDA) +endif() + +if(WITH_CYCLES_DEVICE_OPTIX) + add_definitions(-DWITH_OPTIX) +endif() + +#FRL_CLR_END + # Setup the EXE sources and `buildinfo`. set(SRC creator.c diff --git a/source/creator/creator_args.c b/source/creator/creator_args.c index 5ffd78a1518..fb4376acef3 100644 --- a/source/creator/creator_args.c +++ b/source/creator/creator_args.c @@ -2020,9 +2020,13 @@ static int arg_handle_master_denoiser(int argc, const char **argv, void *data) const char *rmtype = argv[1]; if (BLI_strcasecmp(rmtype, "OIDN") == 0) { scene->r.master_denoiser = MASTER_DENOISER_OIDN; - } else if (BLI_strcasecmp(rmtype, "OPTIX") == 0) { + } +#ifdef WITH_OPTIX + else if (BLI_strcasecmp(rmtype, "OPTIX") == 0) { scene->r.master_denoiser = MASTER_DENOISER_OPTIX; - } else if (BLI_strcasecmp(rmtype, "BARCELONA") == 0) { + } +#endif + else if (BLI_strcasecmp(rmtype, "BARCELONA") == 0) { scene->r.master_denoiser = MASTER_DENOISER_BARCELONA; } else { printf("\nError: Unknown master denoiser %s (--master-denoiser or or ).\n", rmtype); @@ -2064,11 +2068,48 @@ static int arg_handle_master_image_color_format(int argc, const char **argv, voi else { printf( "\nError: no blend loaded. " - "order the arguments so '--master_image_color_format' is after the blend is loaded.\n"); + "order the arguments so '--master-image-color-format' is after the blend is loaded.\n"); return 0; } } +static const char arg_handle_master_image_compressor_doc[] = + "\n" + "\tCompressor that master uses to compress images before sending them to a client.\n" + "\tSupported values: JPEG, NVENCODER\n"; + +static int arg_handle_master_image_compressor(int argc, const char **argv, void *data) +{ + bContext *C = data; + Scene *scene = CTX_data_scene(C); + + scene->r.master_image_color_format = MASTER_IMAGE_COMPRESSOR_JPEG; + if (scene) { + if (argc > 1) { + const char *rmtype = argv[1]; + if (BLI_strcasecmp(rmtype, "JPEG") == 0) { + scene->r.master_image_color_format = MASTER_IMAGE_COMPRESSOR_JPEG; + } +#ifdef WITH_CUDA + else if (BLI_strcasecmp(rmtype, "NVENCODER") == 0) { + scene->r.master_image_color_format = MASTER_IMAGE_COMPRESSOR_NVENCODER; + } +#endif //WITH_CUDA + else { + printf("\nError: Unknown compressor %s (--master-image-compressor or ).\n", rmtype); + } + } + return 1; + } + else { + printf( + "\nError: no blend loaded. " + "order the arguments so '--master-image-compressor' is after the blend is loaded.\n"); + return 0; + } +} + + #ifdef WITH_WEBRTC static int arg_handle_webrtc_int_param(int argc, const char **argv, const char *arg_id, int *param, int default_param_value) @@ -3049,6 +3090,7 @@ void main_args_setup(bContext *C, bArgs *ba) BLI_args_add(ba, NULL, "--save_denoise_io", CB(arg_handle_save_denoise_io), C); BLI_args_add(ba, NULL, "--save_cameras", CB(arg_handle_save_cameras), C); BLI_args_add(ba, NULL, "--master-image-color-format", CB(arg_handle_master_image_color_format), C); + BLI_args_add(ba, NULL, "--master-image-compressor", CB(arg_handle_master_image_compressor), C); #ifdef WITH_WEBRTC BLI_args_add(ba, NULL, "--use-webrtc", CB(arg_handle_use_webrtc), C); BLI_args_add(ba, NULL, "--signaling-server-address", CB(arg_handle_signaling_server_address), C);