GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Unknown custom option \"%s\" with value \"%s\" in %s\n", tok, tokval, options));
tok = strtok(NULL, "=");
}
- free(opt);
+ gf_free(opt);
}
int dc_video_encoder_open(VideoOutputFile *video_output_file, VideoDataConf *video_data_conf, Bool use_source_timing)
#include <gpac/scene_engine.h>
#endif
+#ifndef GPAC_DISABLE_TTXT
+#include <gpac/webvtt.h>
+#endif
+
#ifdef GPAC_DISABLE_ISOM
#error "Cannot compile MP42TS if GPAC is not built with ISO File Format support"
#endif
-#define DEFAULT_PCR_OFFSET 18000
+#define DEFAULT_PCR_OFFSET 0
#define UDP_BUFFER_SIZE 0x40000
#define MP42TS_VIDEO_FREQ 1000 /*meant to send AVC IDR only every CLOCK_REFRESH ms*/
u32 temi_url_insertion_delay = 1000;
+FILE *logfile = NULL;
+
+static void on_gpac_log(void *cbk, u32 ll, u32 lm, const char *fmt, va_list list)
+{
+ FILE *logs = cbk;
+ vfprintf(logs, fmt, list);
+ fflush(logs);
+}
-static GFINLINE void usage(const char * progname)
+static GFINLINE void usage()
{
- fprintf(stderr, "USAGE: %s -rate=R [[-prog=prog1]..[-prog=progn]] [-audio=url] [-video=url] [-mpeg4-carousel=n] [-mpeg4] [-time=n] [-src=file] DST [[DST]]\n"
+ fprintf(stderr, "GPAC version " GPAC_FULL_VERSION "\n"
+ "GPAC Copyright (c) Telecom ParisTech 2000-2012\n"
+ "GPAC Configuration: " GPAC_CONFIGURATION "\n"
+ "Features: %s\n\n", gpac_features());
+ fprintf(stderr, "mp2ts <inputs> <destinations> [options]\n"
"\n"
+ "Inputs:\n"
+ "-prog filename specifies an input file used for a TS service\n"
+ " * currently only supports ISO files and SDP files\n"
+ " * can be used several times, once for each program\n"
+ "\n"
+ "Destinations:\n"
+ "Several destinations may be specified as follows, at least one is mandatory\n"
+ "-dst-udp UDP_address:port (multicast or unicast)\n"
+ "-dst-rtp RTP_address:port\n"
+ "-dst-file filename\n"
+ "The following parameters may be specified when -dst-file is used\n"
+ "-segment-dir dir server local directory to store segments (ends with a '/')\n"
+ "-segment-duration dur segment duration in seconds\n"
+ "-segment-manifest file m3u8 file basename\n"
+ "-segment-http-prefix p client address for accessing server segments\n"
+ "-segment-number n number of segments to list in the manifest\n"
+ "\n"
+ "Basic options:\n"
+ "-rate R specifies target rate in kbps of the multiplex (optional)\n"
+ "-real-time specifies the muxer will work in real-time mode\n"
+ " * if not specified, the muxer will generate the TS as quickly as possible\n"
+ " * automatically set for SDP or BT input\n"
+ "-pcr-init V sets initial value V for PCR - if not set, random value is used\n"
+ "-pcr-offset V offsets all timestamps from PCR by V, in 90kHz. Default value: %d\n"
+ "-psi-rate V sets PSI refresh rate V in ms (default 100ms).\n"
+ " * If 0, PSI data is only send once at the begining or before each IDR when -rap option is set.\n"
+ " * This should be set to 0 for DASH streams.\n"
+ "-time n request the muxer to stop after n ms\n"
+ "-single-au forces 1 PES = 1 AU (disabled by default)\n"
+ "-rap forces RAP/IDR to be aligned with PES start for video streams (disabled by default)\n"
+ " in this mode, PAT, PMT and PCR will be inserted before the first TS packet of the RAP PES\n"
+ "-flush-rap same as -rap but flushes all other streams (sends remaining PES packets) before inserting PAT/PMT\n"
+ "-nb-pack N specifies to pack N TS packets together before sending on network or writing to file\n"
+ "-ttl N specifies Time-To-Live for multicast. Default is 1.\n"
+ "-ifce IPIFCE specifies default IP interface to use. Default is IF_ANY.\n"
+ "-temi [URL] Inserts TEMI time codes in adaptation field. URL is optionnal\n"
+ "-temi-delay DelayMS Specifies delay between two TEMI url descriptors\n"
+ "\n"
+ "MPEG-4/T-DMB options:\n"
+ "-src filename update file: must be either an .sdp or a .bt file\n"
+ "-audio url may be mp3/udp or aac/http (shoutcast/icecast)\n"
+ "-video url shall be a raw h264 frame\n"
+ "-mpeg4-carousel n carousel period in ms\n"
+ "-mpeg4 or -4on2 forces usage of MPEG-4 signaling (IOD and SL Config)\n"
+ "-4over2 same as -4on2 and uses PMT to carry OD Updates\n"
+ "-bifs-pes carries BIFS over PES instead of sections\n"
+ "-bifs-pes-ex carries BIFS over PES without writing timestamps in SL\n"
+ "\n"
+ "Misc options\n"
#ifdef GPAC_MEMORY_TRACKING
- "\t-mem-track: enables memory tracker\n"
+ "-mem-track enables memory tracker\n"
#endif
- "\t-rate=R specifies target rate in kbps of the multiplex (mandatory)\n"
- "\t-real-time specifies the muxer will work in real-time mode\n"
- "\t * automatically set for SDP or BT input\n"
- "\t-pcr-init=V sets initial value V for PCR - if not set, random value is used\n"
- "\t-pcr-offset=V offsets all timestamps from PCR by V, in 90kHz. Default value: %d\n"
- "\t-psi-rate=V sets PSI refresh rate V in ms (default 100ms). If 0, PSI data is only send once at the begining\n"
- " or before each IDR when -rap option is set. This should be set to 0 for DASH streams.\n"
- "\t-time=n request the program to stop after n ms\n"
- "\t-single-au forces 1 PES = 1 AU (disabled by default)\n"
- "\t-rap forces RAP/IDR to be aligned with PES start for video streams (disabled by default)\n"
- " in this mode, PAT, PMT and PCR will be inserted before the first TS packet of the RAP PES\n"
- "\t-flush-rap same as -rap but flushes all other streams (sends remaining PES packets) before inserting PAT/PMT\n"
- "\t-prog=filename specifies an input file used for a TS service\n"
- "\t * currently only supports ISO files and SDP files\n"
- "\t * can be used several times, once for each program\n"
- "\t-nb-pack=N specifies to pack N TS packets together before sending on network or writing to file\n"
- "\t-ttl=N specifies Time-To-Live for multicast. Default is 1.\n"
- "\t-ifce=IPIFCE specifies default IP interface to use. Default is IF_ANY.\n"
- "\t-temi[=URL] Inserts TEMI time codes in adaptation field. URL is optionnal\n"
- "\t-temi-delay=DelayMS Specifies delay between two TEMI url descriptors\n"
-
- "\tDST : Destinations, at least one is mandatory\n"
- "\t -dst-udp UDP_address:port (multicast or unicast)\n"
- "\t -dst-rtp RTP_address:port\n"
- "\t -dst-file Supports the following arguments:\n"
- "\t -segment-dir=dir server local directory to store segments\n"
- "\t -segment-duration=dur segment duration in seconds\n"
- "\t -segment-manifest=file m3u8 file basename\n"
- "\t -segment-http-prefix=p client address for accessing server segments\n"
- "\t -segment-number=n only n segments are used using a cyclic pattern\n"
- "\t\n"
- "\tMPEG-4 options\n"
- "\t-mpeg4-carousel=n carousel period in ms\n"
- "\t-mpeg4 or -4on2 forces usage of MPEG-4 signaling (IOD and SL Config)\n"
- "\t-4over2 same as -4on2 and uses PMT to carry OD Updates\n"
- "\t-bifs-pes carries BIFS over PES instead of sections\n"
- "\t-bifs-pes-ex carries BIFS over PES without writing timestamps in SL\n"
- "\tMisc options\n"
- "\t-audio=url may be mp3/udp or aac/http (shoutcast/icecast)\n"
- "\t-video=url shall be a raw h264 frame\n"
- "\t-src=filename update file: must be either an .sdp or a .bt file\n\n"
- "\t\n"
- "\t-logs set log tools and levels, formatted as a ':'-separated list of toolX[:toolZ]@levelX\n"
- "\t-h or -help print this screen\n"
- "\n", progname, DEFAULT_PCR_OFFSET
+ "-logs set log tools and levels, formatted as a ':'-separated list of toolX[:toolZ]@levelX\n"
+ "-h or -help print this screen\n"
+ "\n", DEFAULT_PCR_OFFSET
);
}
{
GF_ISOFile *mp4;
u32 track, sample_number, sample_count;
+ u32 mstype, mtype;
GF_ISOSample *sample;
/*refresh rate for images*/
u32 image_repeat_ms, nb_repeat_last;
if (timescale || ntp) {
len = 3; //3 bytes flags
- if (timescale) len += 4 + (timecode > 0xFFFFFFFFUL) ? 8 : 4;
+ if (timescale) len += 4 + ((timecode > 0xFFFFFFFFUL) ? 8 : 4);
if (ntp) len += 8;
//write timeline descriptor
case GF_ESI_INPUT_DATA_FLUSH:
{
GF_ESIPacket pck;
+#ifndef GPAC_DISABLE_TTXT
+ GF_List *cues = NULL;
+#endif
if (!priv->sample)
priv->sample = gf_isom_get_sample(priv->mp4, priv->track, priv->sample_number+1, NULL);
pck.flags |= GF_ESI_DATA_AU_END;
pck.data = priv->sample->data;
pck.data_len = priv->sample->dataLength;
+ pck.duration = gf_isom_get_sample_duration(priv->mp4, priv->track, priv->sample_number+1);
+#ifndef GPAC_DISABLE_TTXT
+ if (priv->mtype==GF_ISOM_MEDIA_TEXT && priv->mstype==GF_ISOM_SUBTYPE_WVTT) {
+ u64 start;
+ GF_WebVTTCue *cue;
+ GF_List *gf_webvtt_parse_iso_cues(GF_ISOSample *iso_sample, u64 start);
+ start = (priv->sample->DTS * 1000) / ifce->timescale;
+ cues = gf_webvtt_parse_iso_cues(priv->sample, start);
+ if (gf_list_count(cues)>1) {
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS Muxer] More than one cue in sample\n"));
+ }
+ cue = (GF_WebVTTCue *)gf_list_get(cues, 0);
+ if (cue) {
+ pck.data = cue->text;
+ pck.data_len = (u32)strlen(cue->text)+1;
+ } else {
+ pck.data = NULL;
+ pck.data_len = 0;
+ }
+ }
+#endif
ifce->output_ctrl(ifce, GF_ESI_OUTPUT_DATA_DISPATCH, &pck);
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS Muxer] Track %d: sample %d CTS %d\n", priv->track, priv->sample_number+1, pck.cts));
+#ifndef GPAC_DISABLE_TTXT
+ if (cues) {
+ while (gf_list_count(cues)) {
+ GF_WebVTTCue *cue = (GF_WebVTTCue *)gf_list_get(cues, 0);
+ gf_list_rem(cues, 0);
+ gf_webvtt_cue_del(cue);
+ }
+ gf_list_del(cues);
+ cues = NULL;
+ }
+#endif
gf_isom_sample_del(&priv->sample);
priv->sample_number++;
priv->mp4 = mp4;
priv->track = track_num;
+ priv->mtype = gf_isom_get_media_type(priv->mp4, priv->track);
+ priv->mstype = gf_isom_get_media_subtype(priv->mp4, priv->track, 1);
priv->loop = prog->real_time ? 1 : 0;
priv->sample_count = gf_isom_get_sample_count(mp4, track_num);
prog->samples_count += priv->sample_count;
case GPAC_OTI_AUDIO_AAC_MPEG2_LCP:
case GPAC_OTI_AUDIO_AAC_MPEG2_SSRP:
case GPAC_OTI_VIDEO_MPEG4_PART2:
- ifce->decoder_config = gf_malloc(sizeof(char)*esd->decoderConfig->decoderSpecificInfo->dataLength);
+ ifce->decoder_config = (char *)gf_malloc(sizeof(char)*esd->decoderConfig->decoderSpecificInfo->dataLength);
ifce->decoder_config_size = esd->decoderConfig->decoderSpecificInfo->dataLength;
memcpy(ifce->decoder_config, esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength);
break;
case GPAC_OTI_VIDEO_SVC:
gf_isom_set_nalu_extract_mode(mp4, track_num, GF_ISOM_NALU_EXTRACT_LAYER_ONLY | GF_ISOM_NALU_EXTRACT_INBAND_PS_FLAG | GF_ISOM_NALU_EXTRACT_ANNEXB_FLAG | GF_ISOM_NALU_EXTRACT_VDRD_FLAG);
break;
+ case GPAC_OTI_SCENE_VTT_MP4:
+ ifce->decoder_config = (char *)gf_malloc(sizeof(char)*esd->decoderConfig->decoderSpecificInfo->dataLength);
+ ifce->decoder_config_size = esd->decoderConfig->decoderSpecificInfo->dataLength;
+ memcpy(ifce->decoder_config, esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength);
+ break;
}
}
gf_odf_desc_del((GF_Descriptor *)esd);
}
}
+/*macro to keep retro compatibility with '=' and spaces in parse_args*/
+#define CHECK_PARAM(param) (!strnicmp(arg, param, strlen(param)) \
+ && ( ((arg[strlen(param)] == '=') && (next_arg = arg+strlen(param)+1)) \
+ || ((strlen(arg) == strlen(param)) && ++i && (i<argc) && (next_arg = argv[i]))))
+
/*parse MP42TS arguments*/
-static GFINLINE GF_Err parse_args(int argc, char **argv, u32 *mux_rate, u32 *carrousel_rate, u64 *pcr_init_val, u32 *pcr_offset, u32 *psi_refresh_rate, Bool *single_au_pes, u32 *bifs_use_pes,
+static GFINLINE GF_Err parse_args(int argc, char **argv, u32 *mux_rate, u32 *carrousel_rate, s64 *pcr_init_val, u32 *pcr_offset, u32 *psi_refresh_rate, Bool *single_au_pes, u32 *bifs_use_pes,
M2TSProgram *progs, u32 *nb_progs, char **src_name,
Bool *real_time, u32 *run_time, char **video_buffer, u32 *video_buffer_size,
u32 *audio_input_type, char **audio_input_ip, u16 *audio_input_port,
char** segment_dir, u32 *segment_duration, char **segment_manifest, u32 *segment_number, char **segment_http_prefix, u32 *split_rap, u32 *nb_pck_pack, u32 *ttl, const char **ip_ifce, const char **temi_url)
{
Bool rate_found=0, mpeg4_carousel_found=0, time_found=0, src_found=0, dst_found=0, audio_input_found=0, video_input_found=0,
- seg_dur_found=0, seg_dir_found=0, seg_manifest_found=0, seg_number_found=0, seg_http_found = 0, real_time_found=0;
- char *prog_name, *arg = NULL, *error_msg = "no argument found";
+ seg_dur_found=0, seg_dir_found=0, seg_manifest_found=0, seg_number_found=0, seg_http_found=0, real_time_found=0;
+ char *prog_name, *arg = NULL, *next_arg = NULL, *error_msg = "no argument found";
u32 mpeg4_signaling = GF_M2TS_MPEG4_SIGNALING_NONE;
Bool force_real_time = 0;
s32 i;
for (i=1; i<argc; i++) {
arg = argv[i];
if (!stricmp(arg, "-h") || strstr(arg, "-help")) {
- usage(argv[0]);
+ usage();
return GF_EOS;
}
- else if (!strnicmp(arg, "-pcr-init=", 10)) {
- sscanf(arg, "-pcr-init="LLD, pcr_init_val);
+ else if (CHECK_PARAM("-pcr-init")) {
+ sscanf(next_arg, LLD, pcr_init_val);
}
- else if (!strnicmp(arg, "-pcr-offset=", 12)) {
- *pcr_offset = atoi(arg+12);
+ else if (CHECK_PARAM("-pcr-offset")) {
+ *pcr_offset = atoi(next_arg);
}
- else if (!strnicmp(arg, "-video=", 7)) {
+ else if (CHECK_PARAM("-video")) {
FILE *f;
if (video_input_found) {
error_msg = "multiple '-video' found";
goto error;
}
video_input_found = 1;
- arg+=7;
- f = fopen(arg, "rb");
+ f = fopen(next_arg, "rb");
if (!f) {
error_msg = "video file not found: ";
goto error;
fprintf(stderr, "Error while reading video file, has readen %u chars instead of %u.\n", read, *video_buffer_size);
}
fclose(f);
- } else if (!strnicmp(arg, "-audio=", 7)) {
+ } else if (CHECK_PARAM("-audio")) {
if (audio_input_found) {
error_msg = "multiple '-audio' found";
arg = NULL;
goto error;
}
audio_input_found = 1;
- arg+=7;
- if (!strnicmp(arg, "udp://", 6) || !strnicmp(arg, "rtp://", 6) || !strnicmp(arg, "http://", 7)) {
+ if (!strnicmp(next_arg, "udp://", 6) || !strnicmp(next_arg, "rtp://", 6) || !strnicmp(next_arg, "http://", 7)) {
char *sep;
/*set audio input type*/
- if (!strnicmp(arg, "udp://", 6))
+ if (!strnicmp(next_arg, "udp://", 6))
*audio_input_type = GF_MP42TS_UDP;
- else if (!strnicmp(arg, "rtp://", 6))
+ else if (!strnicmp(next_arg, "rtp://", 6))
*audio_input_type = GF_MP42TS_RTP;
#ifndef GPAC_DISABLE_PLAYER
- else if (!strnicmp(arg, "http://", 7))
+ else if (!strnicmp(next_arg, "http://", 7))
*audio_input_type = GF_MP42TS_HTTP;
#endif
/*http needs to get the complete URL*/
switch(*audio_input_type) {
case GF_MP42TS_UDP:
case GF_MP42TS_RTP:
- sep = strchr(arg+6, ':');
+ sep = strchr(next_arg+6, ':');
*real_time=1;
if (sep) {
*audio_input_port = atoi(sep+1);
sep[0]=0;
- *audio_input_ip = gf_strdup(arg+6);
+ *audio_input_ip = gf_strdup(next_arg+6);
sep[0]=':';
} else {
- *audio_input_ip = gf_strdup(arg+6);
+ *audio_input_ip = gf_strdup(next_arg+6);
}
break;
#ifndef GPAC_DISABLE_PLAYER
case GF_MP42TS_HTTP:
/* No need to dup since it may come from argv */
- *audio_input_ip = arg;
+ *audio_input_ip = next_arg;
assert(audio_input_port != 0);
break;
#endif
assert(0);
}
}
- } else if (!strnicmp(arg, "-psi-rate=", 10) ) {
- *psi_refresh_rate = atoi(arg+10);
- } else if (!stricmp(arg, "-bifs-pes") ) {
+ } else if (CHECK_PARAM("-psi-rate")) {
+ *psi_refresh_rate = atoi(next_arg);
+ } else if (!stricmp(arg, "-bifs-pes")) {
*bifs_use_pes = 1;
- } else if (!stricmp(arg, "-bifs-pes-ex") ) {
+ } else if (!stricmp(arg, "-bifs-pes-ex")) {
*bifs_use_pes = 2;
} else if (!stricmp(arg, "-mpeg4") || !stricmp(arg, "-4on2")) {
mpeg4_signaling = GF_M2TS_MPEG4_SIGNALING_FULL;
#else
fprintf(stderr, "WARNING - GPAC not compiled with Memory Tracker - ignoring \"-mem-track\"\n");
#endif
- } else if (!strnicmp(arg, "-rate=", 6)) {
+ } else if (CHECK_PARAM("-rate")) {
if (rate_found) {
error_msg = "multiple '-rate' found";
arg = NULL;
goto error;
}
rate_found = 1;
- *mux_rate = 1000*atoi(arg+6);
- } else if (!strnicmp(arg, "-mpeg4-carousel=", 16)) {
+ *mux_rate = 1000*atoi(next_arg);
+ } else if (CHECK_PARAM("-mpeg4-carousel")) {
if (mpeg4_carousel_found) {
error_msg = "multiple '-mpeg4-carousel' found";
arg = NULL;
goto error;
}
mpeg4_carousel_found = 1;
- *carrousel_rate = atoi(arg+16);
+ *carrousel_rate = atoi(next_arg);
} else if (!strnicmp(arg, "-real-time", 10)) {
if (real_time_found) {
goto error;
}
real_time_found = 1;
*real_time = 1;
- } else if (!strnicmp(arg, "-time=", 6)) {
+ } else if (CHECK_PARAM("-time")) {
if (time_found) {
error_msg = "multiple '-time' found";
arg = NULL;
goto error;
}
time_found = 1;
- *run_time = atoi(arg+6);
+ *run_time = atoi(next_arg);
} else if (!stricmp(arg, "-single-au")) {
*single_au_pes = 1;
} else if (!stricmp(arg, "-rap")) {
*split_rap = 1;
} else if (!stricmp(arg, "-flush-rap")) {
*split_rap = 2;
+ } else if (CHECK_PARAM("-nb-pack")) {
+ *nb_pck_pack = atoi(next_arg);
+ } else if (CHECK_PARAM("-nb-pck")) {
+ *nb_pck_pack = atoi(next_arg);
+ } else if (CHECK_PARAM("-ttl")) {
+ *ttl = atoi(next_arg);
+ } else if (CHECK_PARAM("-ifce")) {
+ *ip_ifce = next_arg;
+ } else if (CHECK_PARAM("-logs")) {
+ if (gf_log_set_tools_levels(next_arg) != GF_OK)
+ return GF_BAD_PARAM;
+ } else if (CHECK_PARAM("-lf")) {
+ logfile = gf_f64_open(next_arg, "wt");
+ gf_log_set_callback(logfile, on_gpac_log);
+ } else if (CHECK_PARAM("-segment-dir")) {
+ if (seg_dir_found) {
+ goto error;
+ }
+ seg_dir_found = 1;
+ *segment_dir = next_arg;
+ /* TODO: add the path separation char, if missing */
+ } else if (CHECK_PARAM("-segment-duration")) {
+ if (seg_dur_found) {
+ goto error;
+ }
+ seg_dur_found = 1;
+ *segment_duration = atoi(next_arg);
+ } else if (CHECK_PARAM("-segment-manifest=")) {
+ if (seg_manifest_found) {
+ goto error;
+ }
+ seg_manifest_found = 1;
+ *segment_manifest = next_arg;
+ } else if (CHECK_PARAM("-segment-http-prefix=")) {
+ if (seg_http_found) {
+ goto error;
+ }
+ seg_http_found = 1;
+ *segment_http_prefix = next_arg;
+ } else if (CHECK_PARAM("-segment-number=")) {
+ if (seg_number_found) {
+ goto error;
+ }
+ seg_number_found = 1;
+ *segment_number = atoi(next_arg);
+ }
+ else if (CHECK_PARAM("-src")) {
+ if (src_found) {
+ error_msg = "multiple '-src' found";
+ arg = NULL;
+ goto error;
+ }
+ src_found = 1;
+ *src_name = next_arg;
+ } else if (CHECK_PARAM("-dst-file")) {
+ dst_found = 1;
+ *ts_out = gf_strdup(next_arg);
+ } else if (!strnicmp(arg, "-temi", 5)) {
+ *temi_url = "";
+ if (arg[5]=='=' || arg[5]==' ') {
+ *temi_url = arg+6;
+ if (strlen(arg+6) > 150) {
+ fprintf(stderr, "URLs longer than 150 bytes are not currently supported\n");
+ return GF_NOT_SUPPORTED;
+ }
+ }
}
- else if (!strnicmp(arg, "-dst-udp=", 9)) {
- *real_time = 1;
- } else if (!strnicmp(arg, "-dst-rtp=", 9)) {
- *real_time = 1;
+ else if (CHECK_PARAM("-temi-delay")) {
+ temi_url_insertion_delay = atoi(next_arg);
+ }
+ else if (CHECK_PARAM("-dst-udp")) {
+ char *sep = strchr(next_arg, ':');
+ dst_found = 1;
+ *real_time=1;
+ if (sep) {
+ *output_port = atoi(sep+1);
+ sep[0]=0;
+ *udp_out = gf_strdup(next_arg);
+ sep[0]=':';
+ } else {
+ *udp_out = gf_strdup(next_arg);
+ }
+ }
+ else if (CHECK_PARAM("-dst-rtp")) {
+ char *sep = strchr(next_arg, ':');
+ dst_found = 1;
+ *real_time=1;
+ if (sep) {
+ *output_port = atoi(sep+1);
+ sep[0]=0;
+ *rtp_out = gf_strdup(next_arg);
+ sep[0]=':';
+ } else {
+ *rtp_out = gf_strdup(next_arg);
+ }
+ } else if (strnicmp(arg, "-prog", 5)) { //second pass arguments
+ error_msg = "unknown option";
+ goto error;
}
}
if (*real_time) force_real_time = 1;
for (i=1; i<argc; i++) {
arg = argv[i];
if (arg[0]=='-') {
- if (!strnicmp(arg, "-logs=", 6)) {
- if (gf_log_set_tools_levels(argv[i+1]+6) != GF_OK)
- return GF_BAD_PARAM;
- } else if (!strnicmp(arg, "-prog=", 6)) {
+ if (CHECK_PARAM("-prog")) {
u32 res;
- prog_name = arg+6;
+ prog_name = next_arg;
res = open_program(&progs[*nb_progs], prog_name, *carrousel_rate, mpeg4_signaling, *src_name, *audio_input_ip, *audio_input_port, *video_buffer, force_real_time, *bifs_use_pes, *temi_url);
if (res) {
(*nb_progs)++;
if (res==2) *real_time=1;
}
- } else if (!strnicmp(arg, "-segment-dir=", 13)) {
- if (seg_dir_found) {
- goto error;
- }
- seg_dir_found = 1;
- *segment_dir = arg+13;
- /* TODO: add the path separation char, if missing */
- } else if (!strnicmp(arg, "-segment-duration=", 18)) {
- if (seg_dur_found) {
- goto error;
- }
- seg_dur_found = 1;
- *segment_duration = atoi(arg+18);
- } else if (!strnicmp(arg, "-segment-manifest=", 18)) {
- if (seg_manifest_found) {
- goto error;
- }
- seg_manifest_found = 1;
- *segment_manifest = arg+18;
- } else if (!strnicmp(arg, "-segment-http-prefix=", 21)) {
- if (seg_http_found) {
- goto error;
- }
- seg_http_found = 1;
- *segment_http_prefix = arg+21;
- } else if (!strnicmp(arg, "-segment-number=", 16)) {
- if (seg_number_found) {
- goto error;
- }
- seg_number_found = 1;
- *segment_number = atoi(arg+16);
- }
- else if (!strnicmp(arg, "-src=", 5)) {
- if (src_found) {
- error_msg = "multiple '-src' found";
- arg = NULL;
- goto error;
- }
- src_found = 1;
- *src_name = arg+5;
- }
- else if (!strnicmp(arg, "-nb-pack=", 9)) {
- *nb_pck_pack = atoi(arg+9);
- } else if (!strnicmp(arg, "-ttl=", 5)) {
- *ttl = atoi(arg+5);
- } else if (!strnicmp(arg, "-ifce=", 6)) {
- *ip_ifce = arg+6;
- } else if (!strnicmp(arg, "-dst-file=", 10)) {
- dst_found = 1;
- *ts_out = gf_strdup(arg+10);
- } else if (!strnicmp(arg, "-temi", 5)) {
- *temi_url = "";
- if (arg[5]=='=') {
- *temi_url = arg+6;
- if (strlen(arg+6) > 150) {
- fprintf(stderr, "URLs longer than 150 bytes are not currently supported\n");
- return GF_NOT_SUPPORTED;
- }
- }
- }
- else if (!strnicmp(arg, "-temi-delay=", 12)) {
- temi_url_insertion_delay = atoi(arg+12);
- }
- else if (!strnicmp(arg, "-dst-udp=", 9)) {
- char *sep = strchr(arg+9, ':');
- dst_found = 1;
- *real_time=1;
- if (sep) {
- *output_port = atoi(sep+1);
- sep[0]=0;
- *udp_out = gf_strdup(arg+9);
- sep[0]=':';
- } else {
- *udp_out = gf_strdup(arg+9);
- }
- }
- else if (!strnicmp(arg, "-dst-rtp=", 9)) {
- char *sep = strchr(arg+9, ':');
- dst_found = 1;
- *real_time=1;
- if (sep) {
- *output_port = atoi(sep+1);
- sep[0]=0;
- *rtp_out = gf_strdup(arg+9);
- sep[0]=':';
- } else {
- *rtp_out = gf_strdup(arg+9);
- }
- }
- else if (!strnicmp(arg, "-audio=", 7) || !strnicmp(arg, "-video=", 7) || !strnicmp(arg, "-mpeg4", 6))
- ; /*already treated on the first pass*/
- else {
-// error_msg = "unknown option \"%s\"";
-// goto error;
}
}
#if 0
return GF_OK;
} else {
if (!dst_found)
- fprintf(stderr, "Error: Destination argument not found\n\n");
+ fprintf(stderr, "Error: Destination argument not found\n");
if (! *nb_progs)
- fprintf(stderr, "Error: No Programs are available\n\n");
- if (!rate_found)
- fprintf(stderr, "Error: Rate argument not found\n\n");
+ fprintf(stderr, "Error: No Programs are available\n");
+ usage();
return GF_BAD_PARAM;
}
error:
if (!arg) {
- fprintf(stderr, "Error: %s\n\n", error_msg);
+ fprintf(stderr, "Error: %s\n", error_msg);
} else {
- fprintf(stderr, "Error: %s \"%s\"\n\n", error_msg, arg);
+ fprintf(stderr, "Error: %s \"%s\"\n", error_msg, arg);
}
return GF_BAD_PARAM;
}
GF_Err e;
u32 run_time;
Bool real_time, single_au_pes, is_stdout;
- u64 pcr_init_val=0;
+ s64 pcr_init_val = -1;
u32 usec_till_next, ttl, split_rap;
u32 i, j, mux_rate, nb_progs, cur_pid, carrousel_rate, last_print_time, last_video_time, bifs_use_pes, psi_refresh_rate, nb_pck_pack, nb_pck_in_pack;
char *ts_out = NULL, *udp_out = NULL, *rtp_out = NULL, *audio_input_ip = NULL;
/***************************/
muxer = gf_m2ts_mux_new(mux_rate, psi_refresh_rate, real_time);
if (muxer) gf_m2ts_mux_use_single_au_pes_mode(muxer, single_au_pes);
- if (pcr_init_val) gf_m2ts_mux_set_initial_pcr(muxer, pcr_init_val);
+ if (pcr_init_val>=0) gf_m2ts_mux_set_initial_pcr(muxer, (u64) pcr_init_val);
if (ts_out != NULL) {
if (segment_duration) {
if (aac_reader) AAC_Reader_del(aac_reader);
#endif
+ if (logfile) fclose(logfile);
gf_sys_close();
return 0;
}
#endif
#include <gpac/constants.h>
#include <gpac/avparse.h>
-#include <gpac/media_tools.h>
+#include <gpac/internal/media_dev.h>
/*for asctime and gmtime*/
#include <time.h>
/*ISO 639 languages*/
GF_AVCConfig *avccfg, *svccfg;
GF_HEVCConfig *hevccfg, *shvccfg;
GF_AVCConfigSlot *slc;
+ Bool is_adobe_protection = GF_FALSE;
memset(&avc, 0, sizeof(AVCState));
#endif
fprintf(dump, " <NALUSamples>\n");
gf_isom_set_nalu_extract_mode(file, track, GF_ISOM_NALU_EXTRACT_INSPECT);
+ is_adobe_protection = gf_isom_is_adobe_protection_media(file, track, 1);
for (i=0; i<count; i++) {
u64 dts, cts;
u32 size, nal_size, idx;
idx = 1;
ptr = samp->data;
size = samp->dataLength;
+ if (is_adobe_protection) {
+ u8 encrypted_au = ptr[0];
+ if (encrypted_au) {
+ fprintf(dump, " <!-- Sample number %d is an Adobe's protected sample: can not be dumped -->\n", i+1);
+ fprintf(dump, " </Sample>\n\n");
+ continue;
+ }
+ else {
+ ptr++;
+ size--;
+ }
+ }
while (size) {
u32 v = nalh_size;
nal_size = 0;
}
#ifndef GPAC_DISABLE_HEVC
-void dump_hevc_track_info(GF_ISOFile *file, u32 trackNum, GF_HEVCConfig *hevccfg)
+void dump_hevc_track_info(GF_ISOFile *file, u32 trackNum, GF_HEVCConfig *hevccfg, HEVCState *hevc_state)
{
- u32 k;
+ u32 k, idx;
fprintf(stderr, "\t%s Info: Profile %s @ Level %g - Chroma Format %d\n", hevccfg->is_shvc ? "SHVC" : "HEVC", gf_hevc_get_profile_name(hevccfg->profile_idc), ((Double)hevccfg->level_idc) / 30.0, hevccfg->chromaFormat);
fprintf(stderr, "\tNAL Unit length bits: %d - general profile compatibility 0x%08X\n", 8*hevccfg->nal_unit_size, hevccfg->general_profile_compatibility_flags);
fprintf(stderr, "\tParameter Sets: ");
if (ar->type==GF_HEVC_NALU_SEQ_PARAM) {
fprintf(stderr, "%d SPS ", gf_list_count(ar->nalus));
}
- else if (ar->type==GF_HEVC_NALU_PIC_PARAM) {
+ if (ar->type==GF_HEVC_NALU_PIC_PARAM) {
fprintf(stderr, "%d PPS ", gf_list_count(ar->nalus));
}
if (ar->type==GF_HEVC_NALU_VID_PARAM) {
fprintf(stderr, "%d VPS ", gf_list_count(ar->nalus));
+
+ for (idx=0; idx<gf_list_count(ar->nalus); idx++) {
+ GF_AVCConfigSlot *vps = gf_list_get(ar->nalus, idx);
+ gf_media_hevc_read_vps(vps->data, vps->size, hevc_state);
+ }
}
}
+
fprintf(stderr, "\n");
for (k=0; k<gf_list_count(hevccfg->param_array); k++) {
GF_HEVCParamArray *ar=gf_list_get(hevccfg->param_array, k);
- u32 idx, width, height;
+ u32 width, height;
s32 par_n, par_d;
+
if (ar->type !=GF_HEVC_NALU_SEQ_PARAM) continue;
for (idx=0; idx<gf_list_count(ar->nalus); idx++) {
GF_AVCConfigSlot *sps = gf_list_get(ar->nalus, idx);
par_n = par_d = -1;
- gf_hevc_get_sps_info(sps->data, sps->size, NULL, &width, &height, &par_n, &par_d);
+ gf_hevc_get_sps_info_with_state(hevc_state, sps->data, sps->size, NULL, &width, &height, &par_n, &par_d);
fprintf(stderr, "\tSPS resolution %dx%d", width, height);
if ((par_n>0) && (par_d>0)) {
u32 tw, th;
}
gf_isom_get_audio_info(file, trackNum, 1, &sr, &nb_ch, &bps);
+ gf_isom_set_nalu_extract_mode(file, trackNum, GF_ISOM_NALU_EXTRACT_INSPECT);
msub_type = gf_isom_get_media_subtype(file, trackNum, 1);
if ((msub_type==GF_ISOM_SUBTYPE_MPEG4)
|| (msub_type==GF_ISOM_SUBTYPE_LSR1)
|| (msub_type==GF_ISOM_SUBTYPE_HVC1)
|| (msub_type==GF_ISOM_SUBTYPE_HEV1)
+ || (msub_type==GF_ISOM_SUBTYPE_SHV1)
+ || (msub_type==GF_ISOM_SUBTYPE_SHC1)
) {
esd = gf_isom_get_esd(file, trackNum, 1);
if (!esd) {
}
#endif /*GPAC_DISABLE_AV_PARSERS*/
- } else if (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_HEVC) {
+ } else if ((esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_HEVC)
+ || (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_SHVC)
+ ) {
#if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC)
+ HEVCState hevc_state;
GF_HEVCConfig *hevccfg, *shvccfg;
+ memset(&hevc_state, 0, sizeof(HEVCState));
+ hevc_state.sps_active_idx = -1;
#endif
gf_isom_get_visual_info(file, trackNum, 1, &w, &h);
fprintf(stderr, "\n\n\tNon-compliant HEVC track: No hvcC or shcC found in sample description\n");
}
if (hevccfg) {
- dump_hevc_track_info(file, trackNum, hevccfg);
+ dump_hevc_track_info(file, trackNum, hevccfg, &hevc_state);
gf_odf_hevc_cfg_del(hevccfg);
fprintf(stderr, "\n");
}
if (shvccfg) {
- dump_hevc_track_info(file, trackNum, shvccfg);
+ dump_hevc_track_info(file, trackNum, shvccfg, &hevc_state);
gf_odf_hevc_cfg_del(shvccfg);
}
#endif /*GPAC_DISABLE_AV_PARSERS && defined(GPAC_DISABLE_HEVC)*/
gf_isom_get_cenc_info(file, trackNum, 1, NULL, &scheme_type, &version, &IV_size);
fprintf(stderr, "\n*Encrypted stream - CENC scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version);
if (IV_size) fprintf(stderr, "Initialization Vector size: %d bits\n", IV_size*8);
+ } else if(gf_isom_is_adobe_protection_media(file, trackNum, 1)) {
+ gf_isom_get_adobe_protection_info(file, trackNum, 1, NULL, &scheme_type, &version);
+ fprintf(stderr, "\n*Encrypted stream - Adobe protection scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version);
} else {
fprintf(stderr, "\n*Encrypted stream - unknown scheme %s\n", gf_4cc_to_str(gf_isom_is_media_encrypted(file, trackNum, 1) ));
}
}
fprintf(stderr, "File has %d tracks\n", import.nb_tracks);
}
+ if (import.probe_duration) {
+ fprintf(stderr, "Duration: %g ms\n", (Double) (import.probe_duration/1000.0));
+ }
found = 0;
for (i=0; i<import.nb_tracks; i++) {
if (trackID && (trackID != import.tk_info[i].track_num)) continue;
case GF_ISOM_MEDIA_TEXT: fprintf(stderr, "Text (%s)", gf_4cc_to_str(import.tk_info[i].media_type)); break;
case GF_ISOM_MEDIA_SCENE: fprintf(stderr, "Scene (%s)", gf_4cc_to_str(import.tk_info[i].media_type)); break;
case GF_ISOM_MEDIA_OD: fprintf(stderr, "OD (%s)", gf_4cc_to_str(import.tk_info[i].media_type)); break;
+ case GF_ISOM_MEDIA_META: fprintf(stderr, "Metadata (%s)", gf_4cc_to_str(import.tk_info[i].media_type)); break;
default: fprintf(stderr, "Other (4CC: %s)", gf_4cc_to_str(import.tk_info[i].type)); break;
}
u32 track_id, i, timescale, track, stype, profile, level, new_timescale, rescale, svc_mode, tile_mode;
s32 par_d, par_n, prog_id, delay;
s32 tw, th, tx, ty, txtw, txth, txtx, txty;
- Bool do_audio, do_video, do_all, disable, track_layout, text_layout, chap_ref, is_chap, is_chap_file, keep_handler, negative_cts_offset;
+ Bool do_audio, do_video, do_all, disable, track_layout, text_layout, chap_ref, is_chap, is_chap_file, keep_handler, negative_cts_offset, rap_only;
u32 group, handler, rvc_predefined, check_track_for_svc, check_track_for_shvc;
const char *szLan;
GF_Err e;
profile = level = 0;
negative_cts_offset = 0;
tile_mode = 0;
+ rap_only = 0;
tw = th = tx = ty = txtw = txth = txtx = txty = 0;
par_d = par_n = -2;
else if (!stricmp(ext+1, "nosvc") || !stricmp(ext+1, "noshvc")) import_flags |= GF_IMPORT_SVC_NONE;
else if (!stricmp(ext+1, "subsamples")) import_flags |= GF_IMPORT_SET_SUBSAMPLES;
else if (!stricmp(ext+1, "forcesync")) import_flags |= GF_IMPORT_FORCE_SYNC;
+ else if (!stricmp(ext+1, "rap")) rap_only = 1;
else if (!stricmp(ext+1, "mpeg4")) import_flags |= GF_IMPORT_FORCE_MPEG4;
else if (!stricmp(ext+1, "swf-global")) import.swf_flags |= GF_SM_SWF_STATIC_DICT;
else if (!stricmp(ext+1, "swf-no-ctrl")) import.swf_flags &= ~GF_SM_SWF_SPLIT_TIMELINE;
if ((par_n>=0) && (par_d>=0)) {
e = gf_media_change_par(import.dest, i+1, par_n, par_d);
}
+
+ if (rap_only) {
+ e = gf_media_remove_non_rap(import.dest, i+1);
+ }
+
if (handler_name) gf_isom_set_handler_name(import.dest, i+1, handler_name);
else if (!keep_handler) {
char szHName[1024];
if ((import.tk_info[i].type==GF_ISOM_MEDIA_VISUAL) && (par_n>=-1) && (par_d>=-1)) {
e = gf_media_change_par(import.dest, track, par_n, par_d);
}
+ if (rap_only) {
+ e = gf_media_remove_non_rap(import.dest, track);
+ }
if (handler_name) gf_isom_set_handler_name(import.dest, track, handler_name);
else if (!keep_handler) {
char szHName[1024];
GF_ISOFile *orig;
GF_Err e;
char *opts, *multi_cat;
- Float ts_scale;
+ Double ts_scale;
Double dest_orig_dur;
u32 dst_tk, tk_id, mtype;
u64 insert_dts;
if (!count) insert_dts = 0;
}
- ts_scale = (Float) gf_isom_get_media_timescale(dest, dst_tk);
+ ts_scale = gf_isom_get_media_timescale(dest, dst_tk);
ts_scale /= gf_isom_get_media_timescale(orig, i+1);
/*if not a new track, see if we can merge the edit list - this is a crude test that only checks
int live_session(int argc, char **argv)
{
GF_Err e;
- int i;
+ u32 i;
char *filename = NULL;
char *dst = NULL;
char *ifce_addr = NULL;
gf_sleep(10);
continue;
}
- ch = next_carousel(&livesess, &next_time);
+ ch = next_carousel(&livesess, (u32 *) &next_time);
if ((ch==NULL) || (next_time > 20)) {
gf_sleep(20);
continue;
/*in filedump.c*/
#ifndef GPAC_DISABLE_SCENE_DUMP
GF_Err dump_file_text(char *file, char *inName, u32 dump_mode, Bool do_log);
+//void gf_check_isom_files(char *conf_rules, char *inName);
#endif
#ifndef GPAC_DISABLE_SCENE_STATS
void dump_scene_stats(char *file, char *inName, u32 stat_level);
" -cprt string adds copyright string to movie\n"
" -chap file adds chapter information contained in file\n"
" -rem trackID removes track from file\n"
+ " -rap trackID removes all non-RAP samples from track\n"
" -enable trackID enables track\n"
" -disable trackID disables track\n"
" -new forces creation of a new destination file\n"
" -dash-scale SCALE specifies that timing for -dash and -frag are expressed in SCALE units per seconds\n"
" -mem-frags fragments will be produced in memory rather than on disk before flushing to disk\n"
" -pssh-moof stores PSSH boxes in first moof of each segments. By default PSSH are stored in movie box.\n"
+ " -sample-groups-traf stores sample group descriptions in traf (duplicated for each traf) rather than in moof. By default sample group descriptions are stored in movie box.\n"
"\n"
"Advanced Options, should not be needed when using -dash-profile:\n"
7: disables track
8: referenceTrack
9: raw extraction
+ 10: remove non-rap
*/
u32 act_type;
/*track ID*/
Bool enable_mem_tracker = 0;
Bool dump_iod=0;
Bool pssh_in_moof=0;
+ Bool samplegroups_in_traf=0;
Bool daisy_chain_sidx=0;
Bool single_segment=0;
Bool single_file=0;
else if (!stricmp(arg, "-dump-chap-ogg")) dump_chap = 2;
else if (!stricmp(arg, "-hash")) do_hash = 1;
+#if 0
+ else if (!stricmp(arg, "-conf")) {
+ if (i+1==(u32)argc) { fprintf(stderr, "Missing arg - please check usage\n"); MP4BOX_EXIT_WITH_CODE(1); }
+ if (i+2==(u32)argc) {
+ gf_check_isom_files(NULL, argv[i+1]);
+ } else {
+ gf_check_isom_files(argv[i+1], argv[i+2]);
+ }
+ MP4BOX_EXIT_WITH_CODE(0);
+ }
+#endif
else if (!stricmp(arg, "-dmp4")) {
dump_isom = 1;
fprintf(stderr, "WARNING: \"-dmp4\" is deprecated - use \"-diso\" option\n");
} else if (!stricmp(arg, "-dash")) {
CHECK_NEXT_ARG
dash_duration = atof(argv[i+1]) / 1000;
+ if (dash_duration == 0.0) {
+ fprintf(stderr, "\tERROR: \"-dash-dash_duration\": invalid parameter %s\n", argv[i+1]);
+ MP4BOX_EXIT_WITH_CODE(1);
+ }
i++;
} else if (!stricmp(arg, "-subdur")) {
CHECK_NEXT_ARG
single_file = 1;
} else if (!stricmp(arg, "-pssh-moof")) {
pssh_in_moof = 1;
+ } else if (!stricmp(arg, "-sample-groups-traf")) {
+ samplegroups_in_traf = 1;
} else if (!stricmp(arg, "-dash-profile") || !stricmp(arg, "-profile")) {
CHECK_NEXT_ARG
if (!stricmp(argv[i+1], "live") || !stricmp(argv[i+1], "simple")) dash_profile = GF_DASH_PROFILE_LIVE;
} else if (!stricmp(arg, "-ocr")) force_ocr = 1;
else if (!stricmp(arg, "-latm")) hint_flags |= GP_RTP_PCK_USE_LATM_AAC;
else if (!stricmp(arg, "-rap")) {
+ if ((i+1 < (u32)argc) && (argv[i+1][0] != '-')) {
+ if (sscanf(argv[i+1], "%d", &trackID) == 1) {
+ tracks = gf_realloc(tracks, sizeof(TrackAction) * (nb_track_act+1));
+ memset(&tracks[nb_track_act], 0, sizeof(TrackAction) );
+ tracks[nb_track_act].act_type = 10;
+ tracks[nb_track_act].trackID = trackID;
+ nb_track_act++;
+ i++;
+ open_edit = 1;
+ }
+ }
hint_flags |= GP_RTP_PCK_SIGNAL_RAP;
seg_at_rap=1;
}
seg_at_rap, dash_duration, seg_name, seg_ext, segment_marker,
interleaving_time, subsegs_per_sidx, daisy_chain_sidx, frag_at_rap, tmpdir,
dash_ctx, dash_dynamic, mpd_update_time, time_shift_depth, dash_subduration, min_buffer,
- ast_shift_sec, dash_scale, memory_frags, initial_moof_sn, initial_tfdt, no_fragments_defaults, pssh_in_moof);
+ ast_shift_sec, dash_scale, memory_frags, initial_moof_sn, initial_tfdt, no_fragments_defaults, pssh_in_moof, samplegroups_in_traf);
if (e) break;
if (dash_live) {
e = gf_isom_set_track_reference(file, track, GF_4CC(tka->lang[0], tka->lang[1], tka->lang[2], tka->lang[3]), (u32) tka->delay_ms);
needSave = 1;
break;
+ case 10:
+ fprintf(stderr, "Removing non-rap samples from track %d\n", tka->trackID);
+ e = gf_media_remove_non_rap(file, track);
+ needSave = 1;
+ break;
}
if (e) goto err_exit;
}
GF_Terminal *term;
u64 Duration;
GF_Err last_error = GF_OK;
+static Bool enable_add_ons = GF_TRUE;
static Bool request_next_playlist_item = GF_FALSE;
FILE *playlist = NULL;
"\n"
"\t-exit: automatically exits when presentation is over\n"
"\t-run-for TIME: runs for TIME seconds and exits\n"
+ "\t-no-addon: disable automatic loading of media addons declared in source URL\n"
"\t-gui: starts in GUI mode. The GUI is indicated in GPAC config, section General, by the key [StartupFile]\n"
"\n"
"Dumper Options:\n"
if (display_rti) {
char szMsg[1024];
- if (rti.total_cpu_usage && (bench_mode!=2) ) {
+ if (rti.total_cpu_usage && (bench_mode<2) ) {
sprintf(szMsg, "FPS %d CPU %2d (%02d) Mem %d kB",
(u32) gf_term_get_framerate(term, 0), rti.total_cpu_usage, rti.process_cpu_usage, (u32) (rti.gpac_memory / 1024));
} else {
}
if (display_rti==2) {
- if (bench_mode==2) {
+ if (bench_mode>=2) {
PrintAVInfo(GF_FALSE);
}
fprintf(stderr, "%s\r", szMsg);
}
return 1;
}
+ case GF_EVENT_ADDON_DETECTED:
+ if (enable_add_ons)
+ fprintf(stderr, "Media Addon %s detected - enabling it\n", evt->addon_connect.addon_url);
+ return enable_add_ons;
}
return 0;
}
Bool rgbd_dump = GF_FALSE;
Bool depth_dump = GF_FALSE;
Bool pause_at_first = GF_FALSE;
+ Double play_from = 0;
#ifdef GPAC_MEMORY_TRACKING
Bool enable_mem_tracker = GF_FALSE;
#endif
else if (!strcmp(arg, "-no-regulation")) no_regulation = 1;
else if (!strcmp(arg, "-fs")) start_fs = 1;
else if (!strcmp(arg, "-pause")) pause_at_first = 1;
+ else if (!strcmp(arg, "-play-from")) {
+ play_from = atof((const char *) argv[i+1]);
+ }
else if (!strcmp(arg, "-exit")) auto_exit = 1;
else if (!strcmp(arg, "-mem-track")) {
#ifdef GPAC_MEMORY_TRACKING
else if (!strcmp(arg, "-bench")) bench_mode = 1;
else if (!strcmp(arg, "-vbench")) bench_mode = 2;
else if (!strcmp(arg, "-sbench")) bench_mode = 3;
+ else if (!strcmp(arg, "-no-addon")) enable_add_ons = GF_FALSE;
+
else if (!strcmp(arg, "-opt")) {
set_cfg_option(argv[i+1]);
i++;
if (gui_mode) {
if (gui_mode==1) {
hide_shell(1);
- user.init_flags |= GF_TERM_WINDOW_NO_DECORATION;
+ //user.init_flags |= GF_TERM_WINDOW_NO_DECORATION;
}
}
if (bench_mode!=2) {
gf_cfg_set_key(user.config, "Video", "DriverName", "Raw Video Output");
gf_cfg_set_key(user.config, "RAWVideo", "RawOutput", "null");
- gf_cfg_set_key(user.config, "Compositor", "ForceOpenGL", "no");
+ gf_cfg_set_key(user.config, "Compositor", "OpenGLMode", "disable");
} else {
gf_cfg_set_key(user.config, "Video", "DisableVSync", "yes");
}
if (bench_mode) {
display_rti = 2;
gf_term_set_option(term, GF_OPT_VIDEO_BENCH, (bench_mode==3) ? 2 : 1);
- bench_mode=2;
+ if (bench_mode==1) bench_mode=2;
}
if (dump_mode) {
} else {
fprintf(stderr, "Opening URL %s\n", the_url);
if (pause_at_first) fprintf(stderr, "[Status: Paused]\n");
- gf_term_connect_from_time(term, the_url, 0, pause_at_first);
+ gf_term_connect_from_time(term, the_url, (u64) (play_from*1000), pause_at_first);
}
} else {
fprintf(stderr, "Hit 'h' for help\n\n");
void PrintAVInfo(Bool final)
{
GF_MediaInfo a_odi, v_odi, s_odi;
- Float avg_dec_time=0;
+ Double avg_dec_time=0;
u32 tot_time=0;
Bool print_codecs = final;
GF_ObjectManager *odm = gf_term_get_object(term, root_odm, i);
if (!odm) break;
if (gf_term_get_object_info(term, odm, &v_odi) == GF_OK) {
- if (!video_odm && (v_odi.od_type == GF_STREAM_VISUAL) && (v_odi.raw_media || (v_odi.cb_max_count>1)) ) {
+ if (!video_odm && (v_odi.od_type == GF_STREAM_VISUAL) && (v_odi.raw_media || (v_odi.cb_max_count>1) || v_odi.direct_video_memory) ) {
video_odm = odm;
}
else if (!audio_odm && (v_odi.od_type == GF_STREAM_AUDIO)) {
}
if (video_odm) {
- gf_term_get_object_info(term, video_odm, &v_odi);
+ if (gf_term_get_object_info(term, video_odm, &v_odi)!= GF_OK) {
+ video_odm = NULL;
+ return;
+ }
avg_dec_time = 0;
if (v_odi.nb_dec_frames && v_odi.total_dec_time) {
- avg_dec_time = (Float) 1000 * v_odi.nb_dec_frames;
+ avg_dec_time = (Float) 1000000 * v_odi.nb_dec_frames;
avg_dec_time /= v_odi.total_dec_time;
}
}
tot_time = gf_sys_clock() - bench_mode_start;
fprintf(stderr, " \r");
fprintf(stderr, "************** Bench Mode Done in %d ms ********************\n", tot_time);
+ if (bench_mode==3) fprintf(stderr, "** Systems layer only (no decoding) **\n");
if (!video_odm) {
u32 nb_frames_drawn;
u32 dec_run_time = v_odi.last_frame_time - v_odi.first_frame_time;
if (!dec_run_time) dec_run_time = 1;
if (v_odi.duration) fprintf(stderr, "%d%% ", (u32) (100*v_odi.current_time / v_odi.duration ) );
- fprintf(stderr, "%d frames FPS %.2f (max %d ms/f) rate avg %d max %d", v_odi.nb_dec_frames, ((Float)v_odi.nb_dec_frames*1000) / dec_run_time, v_odi.max_dec_time, (u32) v_odi.avg_bitrate/1000, (u32) v_odi.max_bitrate/1000);
+ fprintf(stderr, "%d frames FPS %.2f (max "LLU" us/f) rate avg %d max %d", v_odi.nb_dec_frames, ((Float)v_odi.nb_dec_frames*1000) / dec_run_time, v_odi.max_dec_time, (u32) v_odi.avg_bitrate/1000, (u32) v_odi.max_bitrate/1000);
if (v_odi.nb_droped) {
fprintf(stderr, " (Error during bench: %d frames drop)", v_odi.nb_droped);
}
u32 dec_run_time = a_odi.last_frame_time - a_odi.first_frame_time;
if (!dec_run_time) dec_run_time = 1;
if (a_odi.duration) fprintf(stderr, "%d%% ", (u32) (100*a_odi.current_time / a_odi.duration ) );
- fprintf(stderr, "%d frames (ms/f %.2f avg %d max) rate avg %d max %d", a_odi.nb_dec_frames, ((Float)dec_run_time)/a_odi.nb_dec_frames, a_odi.max_dec_time, (u32) a_odi.avg_bitrate/1000, (u32) a_odi.max_bitrate/1000);
+ fprintf(stderr, "%d frames (ms/f %.2f avg %.2f max) rate avg %d max %d", a_odi.nb_dec_frames, ((Float)dec_run_time)/a_odi.nb_dec_frames, a_odi.max_dec_time/1000.0, (u32) a_odi.avg_bitrate/1000, (u32) a_odi.max_bitrate/1000);
if (a_odi.nb_droped) {
fprintf(stderr, " (Error during bench: %d frames drop)", a_odi.nb_droped);
}
if (s_odi.nb_dec_frames>2 && s_odi.total_dec_time) {
u32 dec_run_time = s_odi.last_frame_time - s_odi.first_frame_time;
if (!dec_run_time) dec_run_time = 1;
- fprintf(stderr, "%d frames FPS %.2f (max %d ms/f) rate avg %d max %d", s_odi.nb_dec_frames, ((Float)s_odi.nb_dec_frames*1000) / dec_run_time, s_odi.max_dec_time, (u32) s_odi.avg_bitrate/1000, (u32) s_odi.max_bitrate/1000);
+ fprintf(stderr, "%d frames FPS %.2f (max "LLD" us/f) rate avg %d max %d", s_odi.nb_dec_frames, ((Float)s_odi.nb_dec_frames*1000) / dec_run_time, s_odi.max_dec_time, (u32) s_odi.avg_bitrate/1000, (u32) s_odi.max_bitrate/1000);
fprintf(stderr, "\n");
} else {
u32 nb_frames_drawn;
tot_time = v_odi.last_frame_time - v_odi.first_frame_time;
if (!tot_time) tot_time=1;
if (v_odi.duration) fprintf(stderr, "%d%% ", (u32) (100*v_odi.current_time / v_odi.duration ) );
- fprintf(stderr, "%d frames FPS %.2f (%dms max) - rate %d ", v_odi.nb_dec_frames, ((Float)v_odi.nb_dec_frames*1000) / tot_time, v_odi.max_dec_time, (u32) v_odi.instant_bitrate/1000);
+ fprintf(stderr, "%d f FPS %.2f (%.2f ms max) rate %d ", v_odi.nb_dec_frames, ((Float)v_odi.nb_dec_frames*1000) / tot_time, v_odi.max_dec_time/1000.0, (u32) v_odi.instant_bitrate/1000);
}
else if (scene_odm) {
avg_dec_time = 0;
if (s_odi.nb_dec_frames>2 && s_odi.total_dec_time) {
- avg_dec_time = (Float) 1000 * s_odi.nb_dec_frames;
+ avg_dec_time = (Float) 1000000 * s_odi.nb_dec_frames;
avg_dec_time /= s_odi.total_dec_time;
if (s_odi.duration) fprintf(stderr, "%d%% ", (u32) (100*s_odi.current_time / s_odi.duration ) );
- fprintf(stderr, "%d frames %.2f (%dms max) - rate %d ", s_odi.nb_dec_frames, avg_dec_time, s_odi.max_dec_time, (u32) s_odi.instant_bitrate/1000);
+ fprintf(stderr, "%d f %.2f ("LLU" us max) - rate %d ", s_odi.nb_dec_frames, avg_dec_time, s_odi.max_dec_time, (u32) s_odi.instant_bitrate/1000);
} else {
u32 nb_frames_drawn;
Double FPS = gf_term_get_simulation_frame_rate(term, &nb_frames_drawn);
tot_time = gf_sys_clock() - bench_mode_start;
FPS = gf_term_get_framerate(term, 1);
- fprintf(stderr, "%d frames FPS %.2f (abs %.2f) ", nb_frames_drawn, (1000.0*nb_frames_drawn / tot_time), FPS);
+ fprintf(stderr, "%d f FPS %.2f (abs %.2f) ", nb_frames_drawn, (1000.0*nb_frames_drawn / tot_time), FPS);
}
}
}
avg_dec_time = (Float) odi.total_dec_time;
avg_dec_time /= odi.nb_dec_frames;
}
- fprintf(stderr, "\tBitrate over last second: %d kbps\n\tMax bitrate over one second: %d kbps\n\tAverage Decoding Time %.2f ms (%d max)\n\tTotal decoded frames %d\n",
+ fprintf(stderr, "\tBitrate over last second: %d kbps\n\tMax bitrate over one second: %d kbps\n\tAverage Decoding Time %.2f ms ("LLU" max)\n\tTotal decoded frames %d\n",
(u32) odi.avg_bitrate/1024, odi.max_bitrate/1024, avg_dec_time, odi.max_dec_time, odi.nb_dec_frames);
}
if (odi.protection) fprintf(stderr, "Encrypted Media%s\n", (odi.protection==2) ? " NOT UNLOCKED" : "");
fprintf(stderr, "\n * %d decoded frames - %d dropped frames\n", odi.nb_dec_frames, odi.nb_droped);
avg_dec_time = 0;
if (odi.nb_dec_frames) { avg_dec_time = (Float) odi.total_dec_time; avg_dec_time /= odi.nb_dec_frames; }
- fprintf(stderr, " * Avg Bitrate %d kbps (%d max) - Avg Decoding Time %.2f ms (%d max)\n",
+ fprintf(stderr, " * Avg Bitrate %d kbps (%d max) - Avg Decoding Time %.2f ms ("LLU" max)\n",
(u32) odi.avg_bitrate/1024, odi.max_bitrate/1024, avg_dec_time, odi.max_dec_time);
}
--- /dev/null
+/*\r
+ * GPAC - Multimedia Framework C SDK\r
+ *\r
+ * Authors: Jean Le Feuvre\r
+ * Copyright (c) Telecom ParisTech 2012\r
+ * All rights reserved\r
+ *\r
+ * This file is part of GPAC - sample DASH library usage\r
+ *\r
+ */\r
+\r
+#ifndef __DEF_BENCH_H__\r
+#define __DEF_BENCH_H__\r
+\r
+#include <gpac/isomedia.h>\r
+#include <openHevcWrapper.h>\r
+#include <windows.h>\r
+#define SDL_MAIN_HANDLED\r
+#include <SDL.h>\r
+#include <gpac/math.h>\r
+\r
+#define GL_GLEXT_PROTOTYPES\r
+\r
+#include <GL/GL.h>\r
+#include <gpac/isomedia.h>\r
+\r
+\r
+\r
+\r
+#define GL_CHECK_ERR {s32 res = glGetError(); if (res) GF_LOG(GF_LOG_ERROR, GF_LOG_MMIO, ("GL Error %d file %s line %d\n", res, __FILE__, __LINE__)); }\r
+\r
+/*macros for GL proto and fun declaration*/\r
+#ifdef _WIN32_WCE\r
+#define GLAPICAST *\r
+#elif defined(WIN32)\r
+#include <windows.h>\r
+#define GLAPICAST APIENTRY *\r
+#else\r
+#define GLAPICAST *\r
+#endif\r
+\r
+#define GLDECL(ret, funname, args) \\r
+typedef ret (GLAPICAST proc_ ## funname)args; \\r
+extern proc_ ## funname funname; \\r
+\r
+#define GLDECL_STATIC(funname) proc_ ## funname funname = NULL\r
+\r
+#if defined GPAC_USE_TINYGL\r
+//no extensions with TinyGL\r
+#elif defined (GPAC_USE_OGL_ES)\r
+//no extensions with OpenGL ES\r
+#elif defined(WIN32) || defined (GPAC_CONFIG_WIN32)\r
+#define LOAD_GL_FUNCS\r
+#define GET_GLFUN(funname) funname = (proc_ ## funname) wglGetProcAddress(#funname) \r
+#elif defined(CONFIG_DARWIN_GL)\r
+extern void (*glutGetProcAddress(const GLubyte *procname))( void );\r
+#define GET_GLFUN(funname) funname = (proc_ ## funname) glutGetProcAddress(#funname) \r
+#else\r
+#define LOAD_GL_FUNCS\r
+extern void (*glXGetProcAddress(const GLubyte *procname))( void );\r
+#define GET_GLFUN(funname) funname = (proc_ ## funname) glXGetProcAddress(#funname) \r
+#endif\r
+\r
+\r
+\r
+#define DEL_SHADER(_a) if (_a) { glDeleteShader(_a); _a = 0; }\r
+#define DEL_PROGRAM(_a) if (_a) { glDeleteProgram(_a); _a = 0; }\r
+\r
+\r
+GLDECL(GLuint, glCreateProgram, (void) )\r
+GLDECL(void, glDeleteProgram, (GLuint ) )\r
+GLDECL(void, glLinkProgram, (GLuint program) )\r
+GLDECL(void, glUseProgram, (GLuint program) )\r
+GLDECL(GLuint, glCreateShader, (GLenum shaderType) )\r
+GLDECL(void, glDeleteShader, (GLuint shader) )\r
+GLDECL(void, glShaderSource, (GLuint shader, GLsizei count, const char **string, const GLint *length) )\r
+GLDECL(void, glCompileShader, (GLuint shader) )\r
+GLDECL(void, glAttachShader, (GLuint program, GLuint shader) )\r
+GLDECL(void, glDetachShader, (GLuint program, GLuint shader) )\r
+GLDECL(void, glGetShaderiv, (GLuint shader, GLenum type, GLint *res) )\r
+GLDECL(void, glGetInfoLogARB, (GLuint shader, GLint size, GLsizei *rsize, const char *logs) )\r
+GLDECL(GLint, glGetUniformLocation, (GLuint prog, const char *name) )\r
+GLDECL(void, glUniform1f, (GLint location, GLfloat v0) )\r
+GLDECL(void, glUniform1i, (GLint location, GLint v0) )\r
+GLDECL(void, glActiveTexture, (GLenum texture) )\r
+GLDECL(void, glClientActiveTexture, (GLenum texture) )\r
+GLDECL(void, glGenBuffers, (GLsizei , GLuint *) )\r
+GLDECL(void, glDeleteBuffers, (GLsizei , GLuint *) )\r
+GLDECL(void, glBindBuffer, (GLenum, GLuint ) )\r
+GLDECL(void, glBufferData, (GLenum, int, void *, GLenum) )\r
+GLDECL(void, glBufferSubData, (GLenum, int, int, void *) )\r
+GLDECL(void *, glMapBuffer, (GLenum, GLenum) )\r
+GLDECL(void *, glUnmapBuffer, (GLenum) )\r
+\r
+\r
+#define GL_TEXTURE_RECTANGLE_EXT 0x84F5\r
+\r
+#define GL_INFO_LOG_LENGTH 0x8B84\r
+#define GL_FRAGMENT_SHADER 0x8B30\r
+#define GL_VERTEX_SHADER 0x8B31\r
+#define GL_PIXEL_UNPACK_BUFFER_ARB 0x88EC\r
+#define GL_STREAM_DRAW_ARB 0x88E0\r
+#define GL_WRITE_ONLY_ARB 0x88B9\r
+#define GL_DYNAMIC_DRAW_ARB 0x88E8\r
+\r
+#define GL_TEXTURE0 0x84C0\r
+#define GL_TEXTURE1 0x84C1\r
+#define GL_TEXTURE2 0x84C2\r
+\r
+\r
+#endif\r
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup Label="ProjectConfigurations">\r
+ <ProjectConfiguration Include="Debug|Win32">\r
+ <Configuration>Debug</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug|x64">\r
+ <Configuration>Debug</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release|Win32">\r
+ <Configuration>Release</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release|x64">\r
+ <Configuration>Release</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ </ItemGroup>\r
+ <PropertyGroup Label="Globals">\r
+ <ProjectGuid>{F728CC84-A7D1-43D2-8A28-05CE9F2FE0D0}</ProjectGuid>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseOfMfc>false</UseOfMfc>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseOfMfc>false</UseOfMfc>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseOfMfc>false</UseOfMfc>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseOfMfc>false</UseOfMfc>\r
+ <CharacterSet>MultiByte</CharacterSet>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r
+ <ImportGroup Label="ExtensionSettings">\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC60.props" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC60.props" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC60.props" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC60.props" />\r
+ </ImportGroup>\r
+ <PropertyGroup Label="UserMacros" />\r
+ <PropertyGroup>\r
+ <_ProjectFileVersion>10.0.40219.1</_ProjectFileVersion>\r
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../../../bin/$(Platform)\$(Configuration)/</OutDir>\r
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">../../../bin/$(Platform)\$(Configuration)/</OutDir>\r
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">.\obj\$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>\r
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">.\obj\$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>\r
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>\r
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>\r
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../../../bin/$(Platform)\$(Configuration)/</OutDir>\r
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">../../../bin/$(Platform)\$(Configuration)/</OutDir>\r
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">.\obj\$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>\r
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">.\obj\$(Platform)\$(Configuration)\$(ProjectName)\</IntDir>\r
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>\r
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>\r
+ </PropertyGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">\r
+ <Midl>\r
+ <TypeLibraryName>\r
+ </TypeLibraryName>\r
+ <HeaderFileName>\r
+ </HeaderFileName>\r
+ </Midl>\r
+ <ClCompile>\r
+ <Optimization>Disabled</Optimization>\r
+ <AdditionalIncludeDirectories>C:\works\software\signals\modules\extra_lib\include\SDL2\;../../../include;../../../extra_lib/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_SCL_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <MinimalRebuild>true</MinimalRebuild>\r
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>\r
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>\r
+ <PrecompiledHeaderOutputFile>.\obj\mp42ts_deb/$(ProjectName).pch</PrecompiledHeaderOutputFile>\r
+ <AssemblerListingLocation>.\obj\mp42ts_deb/</AssemblerListingLocation>\r
+ <ObjectFileName>.\obj\mp42ts_deb/</ObjectFileName>\r
+ <ProgramDataBaseFileName>.\obj\mp42ts_deb/</ProgramDataBaseFileName>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ <DebugInformationFormat>EditAndContinue</DebugInformationFormat>\r
+ </ClCompile>\r
+ <ResourceCompile>\r
+ <PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <Culture>0x040c</Culture>\r
+ </ResourceCompile>\r
+ <Link>\r
+ <AdditionalDependencies>odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <OutputFile>$(OutDir)$(TargetName)$(TargetExt)</OutputFile>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ <AdditionalLibraryDirectories>C:\works\software\signals\modules\extra_lib\lib/$(Platform)/$(Configuration);../../../extra_lib/lib/$(Platform)/$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <ProgramDatabaseFile>$(IntDir)$(ProjectName).pdb</ProgramDatabaseFile>\r
+ <SubSystem>Console</SubSystem>\r
+ <RandomizedBaseAddress>false</RandomizedBaseAddress>\r
+ <DataExecutionPrevention>\r
+ </DataExecutionPrevention>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Link>\r
+ <Bscmake>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ </Bscmake>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">\r
+ <Midl>\r
+ <TypeLibraryName>\r
+ </TypeLibraryName>\r
+ <HeaderFileName>\r
+ </HeaderFileName>\r
+ </Midl>\r
+ <ClCompile>\r
+ <Optimization>Disabled</Optimization>\r
+ <AdditionalIncludeDirectories>C:\works\software\signals\modules\extra_lib\include\SDL2\;../../../include;../../../extra_lib/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_SCL_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>\r
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>\r
+ <PrecompiledHeaderOutputFile>.\obj\mp42ts_deb/$(ProjectName).pch</PrecompiledHeaderOutputFile>\r
+ <AssemblerListingLocation>.\obj\mp42ts_deb/</AssemblerListingLocation>\r
+ <ObjectFileName>.\obj\mp42ts_deb/</ObjectFileName>\r
+ <ProgramDataBaseFileName>.\obj\mp42ts_deb/</ProgramDataBaseFileName>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ </ClCompile>\r
+ <ResourceCompile>\r
+ <PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <Culture>0x040c</Culture>\r
+ </ResourceCompile>\r
+ <Link>\r
+ <AdditionalDependencies>odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <OutputFile>$(OutDir)$(TargetName)$(TargetExt)</OutputFile>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ <AdditionalLibraryDirectories>C:\works\software\signals\modules\extra_lib\lib/$(Platform)/$(Configuration);../../../extra_lib/lib/$(Platform)/$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <ProgramDatabaseFile>$(IntDir)$(ProjectName).pdb</ProgramDatabaseFile>\r
+ <SubSystem>Console</SubSystem>\r
+ <RandomizedBaseAddress>false</RandomizedBaseAddress>\r
+ <DataExecutionPrevention>\r
+ </DataExecutionPrevention>\r
+ </Link>\r
+ <Bscmake>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ </Bscmake>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">\r
+ <Midl>\r
+ <TypeLibraryName>\r
+ </TypeLibraryName>\r
+ <HeaderFileName>\r
+ </HeaderFileName>\r
+ </Midl>\r
+ <ClCompile>\r
+ <Optimization>MaxSpeed</Optimization>\r
+ <InlineFunctionExpansion>OnlyExplicitInline</InlineFunctionExpansion>\r
+ <AdditionalIncludeDirectories>C:\works\software\signals\modules\extra_lib\include\SDL2\;../../../include;../../../extra_lib/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_SCL_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <StringPooling>true</StringPooling>\r
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <PrecompiledHeaderOutputFile>.\obj\mp42ts_rel/$(ProjectName).pch</PrecompiledHeaderOutputFile>\r
+ <AssemblerListingLocation>.\obj\mp42ts_rel/</AssemblerListingLocation>\r
+ <ObjectFileName>.\obj\mp42ts_rel/</ObjectFileName>\r
+ <ProgramDataBaseFileName>.\obj\mp42ts_rel/</ProgramDataBaseFileName>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ </ClCompile>\r
+ <ResourceCompile>\r
+ <PreprocessorDefinitions>NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <Culture>0x040c</Culture>\r
+ </ResourceCompile>\r
+ <Link>\r
+ <AdditionalDependencies>odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <OutputFile>$(OutDir)$(TargetName)$(TargetExt)</OutputFile>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ <AdditionalLibraryDirectories>C:\works\software\signals\modules\extra_lib\lib/$(Platform)/$(Configuration);../../../extra_lib/lib/$(Platform)/$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\r
+ <ProgramDatabaseFile>$(IntDir)$(ProjectName).pdb</ProgramDatabaseFile>\r
+ <SubSystem>Console</SubSystem>\r
+ <RandomizedBaseAddress>false</RandomizedBaseAddress>\r
+ <DataExecutionPrevention>\r
+ </DataExecutionPrevention>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Link>\r
+ <Bscmake>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ </Bscmake>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">\r
+ <Midl>\r
+ <TypeLibraryName>\r
+ </TypeLibraryName>\r
+ <HeaderFileName>\r
+ </HeaderFileName>\r
+ </Midl>\r
+ <ClCompile>\r
+ <Optimization>Full</Optimization>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <AdditionalIncludeDirectories>../../../include;../../../extra_lib/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_SCL_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <StringPooling>true</StringPooling>\r
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <PrecompiledHeaderOutputFile>.\obj\mp42ts_rel/$(ProjectName).pch</PrecompiledHeaderOutputFile>\r
+ <AssemblerListingLocation>.\obj\mp42ts_rel/</AssemblerListingLocation>\r
+ <ObjectFileName>.\obj\mp42ts_rel/</ObjectFileName>\r
+ <ProgramDataBaseFileName>.\obj\mp42ts_rel/</ProgramDataBaseFileName>\r
+ <WarningLevel>Level3</WarningLevel>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ </ClCompile>\r
+ <ResourceCompile>\r
+ <PreprocessorDefinitions>NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <Culture>0x040c</Culture>\r
+ </ResourceCompile>\r
+ <Link>\r
+ <AdditionalDependencies>odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <OutputFile>$(OutDir)$(TargetName)$(TargetExt)</OutputFile>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ <AdditionalLibraryDirectories>../../../extra_lib/lib/$(Platform)/$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\r
+ <ProgramDatabaseFile>$(IntDir)$(ProjectName).pdb</ProgramDatabaseFile>\r
+ <SubSystem>Console</SubSystem>\r
+ <RandomizedBaseAddress>false</RandomizedBaseAddress>\r
+ <DataExecutionPrevention>\r
+ </DataExecutionPrevention>\r
+ </Link>\r
+ <Bscmake>\r
+ <SuppressStartupBanner>true</SuppressStartupBanner>\r
+ </Bscmake>\r
+ </ItemDefinitionGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="main.c" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ProjectReference Include="..\..\..\build\msvc10\libgpac_dll.vcxproj">\r
+ <Project>{d3540754-e0cf-4604-ac11-82de9bd4d814}</Project>\r
+ </ProjectReference>\r
+ </ItemGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
+ <ImportGroup Label="ExtensionTargets">\r
+ </ImportGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+/*\r
+ * GPAC - Multimedia Framework C SDK\r
+ *\r
+ * Authors: Jean Le Feuvre\r
+ * Copyright (c) Telecom ParisTech 2012\r
+ * All rights reserved\r
+ *\r
+ * This file is part of GPAC - sample DASH library usage\r
+ *\r
+ */\r
+\r
+#include "defbench.h"\r
+\r
+\r
+\r
+#if defined(WIN32) && !defined(_WIN32_WCE) && !defined(__GNUC__)\r
+# pragma comment(lib, "libLibOpenHevcWrapper")\r
+#pragma comment(lib, "SDL2")\r
+//#pragma comment(lib, "SDL2main")\r
+#pragma comment(lib, "opengl32")\r
+#endif\r
+\r
+//0: memcpy - 1: memmove - 2: u32 * cast and for loop copy of u32* - 3: memset 0 - 4: not touching the mapped buffer: 5: full memcpy, rely on stride in pixelstorei\r
+#define COPY_TYPE 0\r
+//set to 1 to disable final gltexImage in PBO mode\r
+#define NO_TEX 0\r
+\r
+\r
+SDL_Window *window = NULL;\r
+SDL_GLContext *glctx= NULL;\r
+SDL_Renderer *render= NULL;\r
+GLint txid[3];\r
+u8 *pY = NULL;\r
+u8 *pU = NULL;\r
+u8 *pV = NULL;\r
+u32 width = 0;\r
+u32 height = 0;\r
+u32 size=0;\r
+u32 bpp=8;\r
+u32 Bpp=1;\r
+GLint memory_format=GL_UNSIGNED_BYTE;\r
+GLint pixel_format=GL_LUMINANCE;\r
+GLint texture_type=GL_TEXTURE_RECTANGLE_EXT;\r
+u32 gl_nb_frames = 1;\r
+u32 gl_upload_time = 0;\r
+u32 gl_draw_time = 0;\r
+Bool pbo_mode = GF_TRUE;\r
+Bool first_tx_load = GF_FALSE;\r
+Bool use_vsync=0;\r
+\r
+GLint glsl_program;\r
+GLint vertex_shader;\r
+GLint fragment_shader;\r
+\r
+GLint pbo_Y=0;\r
+GLint pbo_U=0;\r
+GLint pbo_V=0;\r
+\r
+GLDECL_STATIC(glActiveTexture);\r
+GLDECL_STATIC(glClientActiveTexture);\r
+GLDECL_STATIC(glCreateProgram);\r
+GLDECL_STATIC(glDeleteProgram);\r
+GLDECL_STATIC(glLinkProgram);\r
+GLDECL_STATIC(glUseProgram);\r
+GLDECL_STATIC(glCreateShader);\r
+GLDECL_STATIC(glDeleteShader);\r
+GLDECL_STATIC(glShaderSource);\r
+GLDECL_STATIC(glCompileShader);\r
+GLDECL_STATIC(glAttachShader);\r
+GLDECL_STATIC(glDetachShader);\r
+GLDECL_STATIC(glGetShaderiv);\r
+GLDECL_STATIC(glGetInfoLogARB);\r
+GLDECL_STATIC(glGetUniformLocation);\r
+GLDECL_STATIC(glUniform1f);\r
+GLDECL_STATIC(glUniform1i);\r
+GLDECL_STATIC(glGenBuffers);\r
+GLDECL_STATIC(glDeleteBuffers);\r
+GLDECL_STATIC(glBindBuffer);\r
+GLDECL_STATIC(glBufferData);\r
+GLDECL_STATIC(glBufferSubData);\r
+GLDECL_STATIC(glMapBuffer);\r
+GLDECL_STATIC(glUnmapBuffer);\r
+\r
+\r
+static char *glsl_yuv_shader = "\\r
+ #version 140\n\\r
+ #extension GL_ARB_texture_rectangle : enable\n\\r
+ uniform sampler2DRect y_plane;\\r
+ uniform sampler2DRect u_plane;\\r
+ uniform sampler2DRect v_plane;\\r
+ uniform float width;\\r
+ uniform float height;\\r
+ const vec3 offset = vec3(-0.0625, -0.5, -0.5);\\r
+ const vec3 R_mul = vec3(1.164, 0.000, 1.596);\\r
+ const vec3 G_mul = vec3(1.164, -0.391, -0.813);\\r
+ const vec3 B_mul = vec3(1.164, 2.018, 0.000);\\r
+ out vec4 FragColor;\\r
+ void main(void) \\r
+ {\\r
+ vec2 texc;\\r
+ vec3 yuv, rgb;\\r
+ texc = gl_TexCoord[0].st;\\r
+ texc.y = 1.0 - texc.y;\\r
+ texc.x *= width;\\r
+ texc.y *= height;\\r
+ yuv.x = texture2DRect(y_plane, texc).r; \\r
+ texc.x /= 2.0;\\r
+ texc.y /= 2.0;\\r
+ yuv.y = texture2DRect(u_plane, texc).r; \\r
+ yuv.z = texture2DRect(v_plane, texc).r; \\r
+ yuv += offset; \\r
+ rgb.r = dot(yuv, R_mul); \\r
+ rgb.g = dot(yuv, G_mul); \\r
+ rgb.b = dot(yuv, B_mul); \\r
+ FragColor = vec4(rgb, 1.0);\\r
+ }";\r
+\r
+static char *default_glsl_vertex = "\\r
+ varying vec3 gfNormal;\\r
+ varying vec3 gfView;\\r
+ void main(void)\\r
+ {\\r
+ gfView = vec3(gl_ModelViewMatrix * gl_Vertex);\\r
+ gfNormal = normalize(gl_NormalMatrix * gl_Normal);\\r
+ gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;\\r
+ gl_TexCoord[0] = gl_MultiTexCoord0;\\r
+ }";\r
+\r
+\r
+\r
+Bool sdl_compile_shader(u32 shader_id, const char *name, const char *source)\r
+{\r
+ GLint blen = 0; \r
+ GLsizei slen = 0;\r
+ u32 len;\r
+ if (!source || !shader_id) return 0;\r
+ len = (u32) strlen(source);\r
+ glShaderSource(shader_id, 1, &source, &len);\r
+ glCompileShader(shader_id);\r
+ \r
+ glGetShaderiv(shader_id, GL_INFO_LOG_LENGTH , &blen); \r
+ if (blen > 1) {\r
+ char* compiler_log = (char*) gf_malloc(blen);\r
+#ifdef CONFIG_DARWIN_GL\r
+ glGetInfoLogARB((GLhandleARB) shader_id, blen, &slen, compiler_log);\r
+#else\r
+ glGetInfoLogARB(shader_id, blen, &slen, compiler_log);\r
+#endif\r
+ GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[GLSL] Failed to compile shader %s: %s\n", name, compiler_log));\r
+ gf_free (compiler_log);\r
+ return 0;\r
+ }\r
+ return 1;\r
+}\r
+\r
+void sdl_init(u32 _width, u32 _height, u32 _bpp, u32 stride, Bool use_pbo)\r
+{\r
+ u32 i, flags;\r
+ Float hw, hh;\r
+ GLint loc;\r
+ GF_Matrix mx;\r
+ width = _width;\r
+ height = _height;\r
+ bpp = _bpp;\r
+\r
+\r
+ SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);\r
+ SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 16);\r
+ SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 0);\r
+ SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);\r
+ SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);\r
+ SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);\r
+\r
+ flags = SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE | SDL_WINDOW_BORDERLESS | SDL_WINDOW_MAXIMIZED;\r
+ if (use_vsync) flags |= SDL_RENDERER_PRESENTVSYNC;\r
+ window = SDL_CreateWindow("", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, width, height, flags);\r
+ glctx = SDL_GL_CreateContext(window);\r
+ SDL_GL_MakeCurrent(window, glctx);\r
+\r
+ render = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED);\r
+\r
+\r
+#if (COPY_TYPE==5) \r
+ size = stride*height;\r
+#else\r
+ size = width*height;\r
+#endif\r
+ if (bpp>8) {\r
+ size *= 2;\r
+ Bpp = 2;\r
+ }\r
+ pY = gf_malloc(size*sizeof(u8));\r
+ memset(pY, 0x80, size*sizeof(u8));\r
+ pU = gf_malloc(size/4*sizeof(u8));\r
+ memset(pU, 0, size/4*sizeof(u8));\r
+ pV = gf_malloc(size/4*sizeof(u8));\r
+ memset(pV, 0, size/4*sizeof(u8));\r
+\r
+ glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);\r
+ glViewport(0, 0, width, height);\r
+\r
+ gf_mx_init(mx);\r
+ hw = ((Float)width)/2;\r
+ hh = ((Float)height)/2;\r
+ gf_mx_ortho(&mx, -hw, hw, -hh, hh, 50, -50);\r
+ glMatrixMode(GL_PROJECTION);\r
+ glLoadMatrixf(mx.m);\r
+\r
+\r
+ glMatrixMode(GL_TEXTURE);\r
+ glLoadIdentity();\r
+\r
+ glMatrixMode(GL_MODELVIEW);\r
+ glLoadIdentity();\r
+\r
+ glClear(GL_DEPTH_BUFFER_BIT);\r
+ glDisable(GL_NORMALIZE);\r
+ glDisable(GL_DEPTH_TEST);\r
+ glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);\r
+ glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);\r
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);\r
+ glDisable(GL_LINE_SMOOTH);\r
+ glDisable(GL_LINE_SMOOTH);\r
+ glDisable(GL_LIGHTING);\r
+ glDisable(GL_BLEND);\r
+ glDisable(GL_TEXTURE_2D);\r
+ glDisable(GL_CULL_FACE);\r
+\r
+\r
+ GET_GLFUN(glActiveTexture);\r
+ GET_GLFUN(glClientActiveTexture);\r
+ GET_GLFUN(glCreateProgram);\r
+ GET_GLFUN(glDeleteProgram);\r
+ GET_GLFUN(glLinkProgram);\r
+ GET_GLFUN(glUseProgram);\r
+ GET_GLFUN(glCreateShader);\r
+ GET_GLFUN(glDeleteShader);\r
+ GET_GLFUN(glShaderSource);\r
+ GET_GLFUN(glCompileShader);\r
+ GET_GLFUN(glAttachShader);\r
+ GET_GLFUN(glDetachShader);\r
+ GET_GLFUN(glGetShaderiv);\r
+ GET_GLFUN(glGetInfoLogARB);\r
+ GET_GLFUN(glGetUniformLocation);\r
+ GET_GLFUN(glUniform1f);\r
+ GET_GLFUN(glUniform1i);\r
+ GET_GLFUN(glGenBuffers);\r
+ GET_GLFUN(glDeleteBuffers);\r
+ GET_GLFUN(glBindBuffer);\r
+ GET_GLFUN(glBufferData);\r
+ GET_GLFUN(glBufferSubData);\r
+ GET_GLFUN(glMapBuffer);\r
+ GET_GLFUN(glUnmapBuffer);\r
+\r
+ glsl_program = glCreateProgram();\r
+ vertex_shader = glCreateShader(GL_VERTEX_SHADER);\r
+ sdl_compile_shader(vertex_shader, "vertex", default_glsl_vertex);\r
+\r
+ fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);\r
+ sdl_compile_shader(fragment_shader, "fragment", glsl_yuv_shader);\r
+\r
+ glAttachShader(glsl_program, vertex_shader);\r
+ glAttachShader(glsl_program, fragment_shader);\r
+ glLinkProgram(glsl_program); \r
+\r
+ glGenTextures(3, txid);\r
+ for (i=0; i<3; i++) {\r
+\r
+ glEnable(texture_type);\r
+ glBindTexture(texture_type, txid[i] );\r
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);\r
+ if (bpp>8) {\r
+ glPixelTransferi(GL_RED_SCALE, 64);\r
+ memory_format=GL_UNSIGNED_SHORT;\r
+ }\r
+ glTexParameteri(texture_type, GL_TEXTURE_WRAP_S, GL_CLAMP);\r
+ glTexParameteri(texture_type, GL_TEXTURE_WRAP_T, GL_CLAMP);\r
+ glTexParameteri(texture_type, GL_TEXTURE_MAG_FILTER, GL_LINEAR);\r
+ glTexParameteri(texture_type, GL_TEXTURE_MIN_FILTER, GL_LINEAR);\r
+ \r
+ if (bpp>8) {\r
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 2);\r
+ } else {\r
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);\r
+ }\r
+ glDisable(texture_type);\r
+ }\r
+\r
+ //sets uniforms: y, u, v textures point to texture slots 0, 1 and 2\r
+ glUseProgram(glsl_program); \r
+ for (i=0; i<3; i++) {\r
+ const char *txname = (i==0) ? "y_plane" : (i==1) ? "u_plane" : "v_plane";\r
+ loc = glGetUniformLocation(glsl_program, txname);\r
+ if (loc == -1) {\r
+ GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to locate texture %s in YUV shader\n", txname));\r
+ continue;\r
+ }\r
+ glUniform1i(loc, i);\r
+ }\r
+ loc = glGetUniformLocation(glsl_program, "width");\r
+ if (loc>= 0) {\r
+ Float w = (Float) width;\r
+ glUniform1f(loc, w);\r
+ }\r
+ loc = glGetUniformLocation(glsl_program, "height");\r
+ if (loc>= 0) {\r
+ Float h = (Float) height;\r
+ glUniform1f(loc, h);\r
+ }\r
+\r
+ glUseProgram(0); \r
+\r
+\r
+ if (glMapBuffer==NULL) use_pbo = GF_FALSE;\r
+\r
+\r
+ pbo_mode = use_pbo;\r
+ first_tx_load = use_pbo ? GF_FALSE : GF_TRUE;\r
+ if (use_pbo) {\r
+ glGenBuffers(1, &pbo_Y);\r
+ glGenBuffers(1, &pbo_U);\r
+ glGenBuffers(1, &pbo_V);\r
+\r
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo_Y);\r
+ glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, size, NULL, GL_DYNAMIC_DRAW_ARB);\r
+\r
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo_U);\r
+ glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, size/4, NULL, GL_DYNAMIC_DRAW_ARB);\r
+\r
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo_V);\r
+ glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, size/4, NULL, GL_DYNAMIC_DRAW_ARB);\r
+\r
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);\r
+ }\r
+}\r
+\r
+void sdl_close()\r
+{\r
+ DEL_SHADER(vertex_shader);\r
+ DEL_SHADER(fragment_shader);\r
+ DEL_PROGRAM(glsl_program );\r
+\r
+ if (pbo_mode && pbo_Y) {\r
+ glDeleteBuffers(1, &pbo_Y);\r
+ glDeleteBuffers(1, &pbo_U);\r
+ glDeleteBuffers(1, &pbo_V);\r
+ }\r
+\r
+ if (pY) gf_free(pY);\r
+ if (pU) gf_free(pU);\r
+ if (pV) gf_free(pV);\r
+\r
+ if (glctx) SDL_GL_DeleteContext(glctx);\r
+ if (render) SDL_DestroyRenderer(render);\r
+ if (window) SDL_DestroyWindow(window);\r
+}\r
+\r
+void sdl_draw_quad()\r
+{\r
+ Float w = ((Float)width)/2;\r
+ Float h = ((Float)height)/2;\r
+\r
+ glBegin(GL_QUADS);\r
+\r
+ glVertex3f(w, h, 0);\r
+ glTexCoord2f(1, 0);\r
+\r
+ glVertex3f(w, -h, 0);\r
+ glTexCoord2f(0, 0);\r
+\r
+ glVertex3f(-w, -h, 0);\r
+ glTexCoord2f(0, 1);\r
+\r
+ glVertex3f(-w, h, 0);\r
+ glTexCoord2f(1, 1);\r
+\r
+ glEnd();\r
+}\r
+\r
+\r
+void sdl_draw_frame(u8 *pY, u8 *pU, u8 *pV, u32 w, u32 h, u32 bit_depth, u32 stride)\r
+{\r
+ u32 needs_stride = 0;\r
+ u32 now, end;\r
+\r
+ if (stride != w) {\r
+ if (bit_depth==10) {\r
+ if (stride != 2*w) {\r
+ needs_stride = stride;\r
+ }\r
+ } else {\r
+ needs_stride = stride;\r
+ }\r
+ }\r
+\r
+ glEnable(texture_type);\r
+\r
+ now = gf_sys_clock();\r
+\r
+\r
+ if (first_tx_load) {\r
+ glBindTexture(texture_type, txid[0] );\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, needs_stride);\r
+ glTexImage2D(texture_type, 0, 1, w, h, 0, pixel_format, memory_format, pY);\r
+\r
+ glBindTexture(texture_type, txid[1] );\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, needs_stride/2);\r
+ glTexImage2D(texture_type, 0, 1, w/2, h/2, 0, pixel_format, memory_format, pU);\r
+\r
+ glBindTexture(texture_type, txid[2] );\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, needs_stride/2);\r
+ glTexImage2D(texture_type, 0, 1, w/2, h/2, 0, pixel_format, memory_format, pV);\r
+\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);\r
+ first_tx_load = GF_FALSE;\r
+ } else if (pbo_mode) {\r
+ u32 i, linesize, count, p_stride;\r
+ u8 *ptr;\r
+#if (COPY_TYPE==2)\r
+ u32 *s, *d;\r
+ u32 j, c2;\r
+#endif\r
+\r
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);\r
+\r
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo_Y);\r
+ ptr =(u8 *)glMapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, GL_WRITE_ONLY_ARB);\r
+#if (COPY_TYPE==5)\r
+ memcpy(ptr, pY, size);\r
+#elif (COPY_TYPE==3)\r
+ memset(ptr, 0x80, size);\r
+#elif (COPY_TYPE==4)\r
+#else\r
+ linesize = width*Bpp;\r
+ p_stride = stride*Bpp;\r
+ count = h;\r
+#if (COPY_TYPE==2)\r
+ c2 = linesize/4;\r
+ s = (u32 *)pY;\r
+ d = (u32 *)ptr;\r
+#endif\r
+ for (i=0; i<count; i++) {\r
+#if (COPY_TYPE==0) || (COPY_TYPE==1)\r
+#if (COPY_TYPE==0)\r
+ memcpy(ptr, pY, linesize);\r
+#else\r
+ memmove(ptr, pY, linesize);\r
+#endif\r
+ pY+= p_stride;\r
+ ptr += linesize;\r
+#else\r
+ for (j=0; j<linesize/4; j++) {\r
+ *d++ = *s++;;\r
+ }\r
+ s+= (p_stride-linesize)/4;\r
+#endif\r
+ }\r
+#endif\r
+ glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB);\r
+\r
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo_U);\r
+ ptr =(u8 *)glMapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, GL_WRITE_ONLY_ARB);\r
+#if (COPY_TYPE==5)\r
+ memcpy(ptr, pU, size/4);\r
+#elif (COPY_TYPE==3)\r
+ memset(ptr, 0x80, size/4);\r
+#elif (COPY_TYPE==4)\r
+#else\r
+ linesize = width*Bpp/2;\r
+ p_stride = stride*Bpp/2;\r
+ count/=2;\r
+#if (COPY_TYPE==2)\r
+ c2 /= 2;\r
+ s = (u32 *)pU;\r
+ d = (u32 *)ptr;\r
+#endif\r
+ for (i=0; i<count; i++) {\r
+#if (COPY_TYPE==0) || (COPY_TYPE==1)\r
+#if (COPY_TYPE==0)\r
+ memcpy(ptr, pU, linesize);\r
+#else\r
+ memmove(ptr, pU, linesize);\r
+#endif\r
+ pU+= p_stride;\r
+ ptr += linesize;\r
+#else\r
+ for (j=0; j<linesize/4; j++) {\r
+ *d++ = *s++;;\r
+ }\r
+ s+= (p_stride-linesize)/4;\r
+#endif\r
+ }\r
+#endif\r
+ glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB);\r
+\r
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo_V);\r
+ ptr =(u8 *)glMapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, GL_WRITE_ONLY_ARB);\r
+#if (COPY_TYPE==5)\r
+ memcpy(ptr, pV, size/4);\r
+#elif (COPY_TYPE==3)\r
+ memset(ptr, 0x80, size/4);\r
+#elif (COPY_TYPE==4)\r
+#else\r
+#if (COPY_TYPE==2)\r
+ s = (u32 *)pV;\r
+ d = (u32 *)ptr;\r
+#endif\r
+ for (i=0; i<count; i++) {\r
+#if (COPY_TYPE==0) || (COPY_TYPE==1)\r
+#if (COPY_TYPE==0)\r
+ memcpy(ptr, pV, linesize);\r
+#else\r
+ memmove(ptr, pV, linesize);\r
+#endif\r
+ pV+= p_stride;\r
+ ptr += linesize;\r
+#else\r
+ for (j=0; j<linesize/4; j++) {\r
+ *d++ = *s++;;\r
+ }\r
+ s+= (p_stride-linesize)/4;\r
+#endif\r
+ }\r
+#endif\r
+ glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB);\r
+\r
+#if (COPY_TYPE!=5) \r
+ needs_stride=0;\r
+#endif\r
+ \r
+\r
+#if (NO_TEX==0)\r
+ glBindTexture(texture_type, txid[0] );\r
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo_Y);\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, needs_stride);\r
+ glTexImage2D(texture_type, 0, 1, w, h, 0, pixel_format, memory_format, NULL);\r
+ //glTexSubImage2D crashes with PBO and 2-bytes luminance on my FirePro W5000 ...\r
+// glTexSubImage2D(texture_type, 0, 0, 0, w, h, pixel_format, memory_format, pY);\r
+\r
+ glBindTexture(texture_type, txid[1] );\r
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo_U);\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, needs_stride/2);\r
+ glTexImage2D(texture_type, 0, 1, w/2, h/2, 0, pixel_format, memory_format, NULL);\r
+// glTexSubImage2D(texture_type, 0, 0, 0, w/2, h/2, pixel_format, memory_format, pU);\r
+\r
+ glBindTexture(texture_type, txid[2] );\r
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo_V);\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, needs_stride/2);\r
+ glTexImage2D(texture_type, 0, 1, w/2, h/2, 0, pixel_format, memory_format, NULL);\r
+// glTexSubImage2D(texture_type, 0, 0, 0, w/2, h/2, pixel_format, memory_format, pV);\r
+#endif\r
+\r
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);\r
+ } else {\r
+ glBindTexture(texture_type, txid[0] );\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, needs_stride);\r
+ glTexSubImage2D(texture_type, 0, 0, 0, w, h, pixel_format, memory_format, pY);\r
+ glBindTexture(texture_type, 0);\r
+\r
+ glBindTexture(texture_type, txid[1] );\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, needs_stride/2);\r
+ glTexSubImage2D(texture_type, 0, 0, 0, w/2, h/2, pixel_format, memory_format, pU);\r
+ glBindTexture(texture_type, 0);\r
+\r
+ glBindTexture(texture_type, txid[2] );\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, needs_stride/2);\r
+ glTexSubImage2D(texture_type, 0, 0, 0, w/2, h/2, pixel_format, memory_format, pV);\r
+ glBindTexture(texture_type, 0);\r
+\r
+ if (needs_stride) glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);\r
+ }\r
+ end = gf_sys_clock() - now;\r
+\r
+ if (!first_tx_load) {\r
+ gl_nb_frames ++;\r
+ gl_upload_time += end;\r
+ }\r
+\r
+ glUseProgram(glsl_program);\r
+\r
+ glActiveTexture(GL_TEXTURE2);\r
+ glBindTexture(texture_type, txid[2]);\r
+\r
+ glActiveTexture(GL_TEXTURE1);\r
+ glBindTexture(texture_type, txid[1]);\r
+\r
+ glActiveTexture(GL_TEXTURE0 );\r
+ glBindTexture(texture_type, txid[0]);\r
+ \r
+ glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);\r
+ glClientActiveTexture(GL_TEXTURE0);\r
+\r
+ sdl_draw_quad();\r
+\r
+ glDisable(texture_type);\r
+ glUseProgram(0);\r
+\r
+ glActiveTexture(GL_TEXTURE0);\r
+ glBindTexture(texture_type, 0);\r
+\r
+ SDL_GL_SwapWindow(window);\r
+\r
+ gl_draw_time += gf_sys_clock() - now;\r
+ return;\r
+}\r
+\r
+\r
+void sdl_bench()\r
+{\r
+ Double rate;\r
+ u32 i, count;\r
+ u32 start = gf_sys_clock();\r
+\r
+ count = 600;\r
+ for (i=0; i<count; i++) {\r
+ sdl_draw_frame(pY, pU, pV, width, height, 8, width);\r
+ }\r
+ \r
+ start = gf_sys_clock() - start;\r
+ rate = 3*size/2;\r
+ rate /= start; //in ms\r
+ rate /= 1000; //==*1000 (in s) / 1000 * 1000 in MB /s\r
+ fprintf(stdout, "gltext pushed %d frames in %d ms - FPS %g - data rate %g MB/s\n", count, start, 100.0*count/start, rate);\r
+}\r
+\r
+void PrintUsage()\r
+{\r
+ fprintf(stderr, "USAGE: [OPTS] file.mp4\n"\r
+ "\n"\r
+ "Options:\n"\r
+ "-bench-yuv: only bench YUV upload rate\n"\r
+ "-sys-mem: uses copy from decoder mem to system mem before upload (removes stride)\n"\r
+ "-use-pbo: uses PixelBufferObject for texture transfer\n"\r
+ "-no-display: disables video output\n"\r
+ "-nb-threads=N: sets number of frame to N (default N=6)\n"\r
+ "-mode=[frame|wpp|frame+wpp] : sets threading type (default is frame)\n"\r
+ );\r
+}\r
+\r
+int main(int argc, char **argv)\r
+{\r
+ Bool sdl_bench_yuv = GF_FALSE;\r
+ Bool no_display = GF_FALSE;\r
+ u32 start, now, check_prompt;\r
+ Bool sdl_is_init=GF_FALSE, run;\r
+ Bool paused = GF_FALSE;\r
+ u32 pause_time = 0;\r
+ GF_ISOFile *isom;\r
+ u32 i, count, track = 0;\r
+ u32 nb_frames_at_start = 0;\r
+ GF_ESD *esd;\r
+ u32 nb_threads = 6;\r
+ u32 mode = 1;\r
+ Bool use_raw_memory = GF_TRUE;\r
+ OpenHevc_Handle ohevc;\r
+ Bool use_pbo = GF_FALSE;\r
+ Bool enable_mem_tracker = GF_FALSE;\r
+ const char *src = NULL;\r
+\r
+ if (argc<2) {\r
+ PrintUsage();\r
+ return 0;\r
+ }\r
+\r
+ for (i=0; i<(u32)argc; i++) {\r
+ char *arg = argv[i];\r
+ if (arg[0]!='-') {\r
+ src = arg;\r
+ continue;\r
+ }\r
+ if (!strcmp(arg, "-bench-yuv")) sdl_bench_yuv=1;\r
+ else if (!strcmp(arg, "-sys-mem")) use_raw_memory = 0;\r
+ else if (!strcmp(arg, "-vsync")) use_vsync = 1;\r
+ else if (!strcmp(arg, "-use-pbo")) use_pbo = 1;\r
+ else if (!strcmp(arg, "-no-display")) no_display = 1;\r
+ else if (!strcmp(arg, "-mem-track")) enable_mem_tracker = GF_TRUE;\r
+ else if (!strncmp(arg, "-nb-threads=", 12)) nb_threads = atoi(arg+12);\r
+ else if (!strncmp(arg, "-mode=", 6)) {\r
+ if (!strcmp(arg+6, "wpp")) mode = 2;\r
+ else if (!strcmp(arg+6, "frame+wpp")) mode = 3;\r
+ else mode = 1;\r
+ }\r
+\r
+ else if (!strcmp(arg, "-h")) {\r
+ PrintUsage();\r
+ return 0;\r
+ }\r
+ }\r
+\r
+ \r
+\r
+ /*****************/\r
+ /* gpac init */\r
+ /*****************/\r
+#ifdef GPAC_MEMORY_TRACKING\r
+ gf_sys_init(enable_mem_tracker);\r
+#else\r
+ gf_sys_init(GF_FALSE);\r
+#endif\r
+ gf_log_set_tool_level(GF_LOG_ALL, GF_LOG_WARNING);\r
+\r
+ if (sdl_bench_yuv) {\r
+ sdl_init(3840, 2160, 8, 3840, use_pbo);\r
+ sdl_bench();\r
+ sdl_close();\r
+ gf_sys_close();\r
+ return 0;\r
+ }\r
+ if (!src) {\r
+ PrintUsage();\r
+ gf_sys_close();\r
+ return 0;\r
+ }\r
+\r
+\r
+ isom = gf_isom_open(src, GF_ISOM_OPEN_READ, NULL);\r
+ if (!isom) {\r
+ sdl_close();\r
+ gf_sys_close();\r
+ return 0;\r
+ }\r
+\r
+ for (i=0; i<gf_isom_get_track_count(isom); i++) {\r
+ if (gf_isom_get_hevc_shvc_type(isom, i+1, 1)>=GF_ISOM_HEVCTYPE_HEVC_ONLY) {\r
+ track = i+1;\r
+ break;\r
+ }\r
+ }\r
+\r
+ if (!track) {\r
+ gf_isom_close(isom);\r
+ sdl_close();\r
+ gf_sys_close();\r
+ return 0;\r
+ }\r
+\r
+ count = gf_isom_get_sample_count(isom, track);\r
+ start = gf_sys_clock();\r
+\r
+ esd = gf_isom_get_esd(isom, track, 1);\r
+ ohevc = libOpenHevcInit(nb_threads, mode);\r
+ if (esd->decoderConfig && esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) {\r
+ libOpenHevcCopyExtraData(ohevc, esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength+8);\r
+ }\r
+ libOpenHevcStartDecoder(ohevc);\r
+ gf_odf_desc_del((GF_Descriptor *)esd);\r
+ gf_isom_set_sample_padding(isom, track, 8);\r
+\r
+ run=1;\r
+ check_prompt=0;\r
+ for (i=0; i<count && run; i++) {\r
+ u32 di;\r
+ if (!paused) {\r
+ GF_ISOSample *sample = gf_isom_get_sample(isom, track, i+1, &di);\r
+\r
+ if ( libOpenHevcDecode(ohevc, sample->data, sample->dataLength, sample->DTS+sample->CTS_Offset) ) {\r
+ OpenHevc_Frame_cpy HVCFrame;\r
+\r
+ libOpenHevcGetPictureInfo(ohevc, &HVCFrame.frameInfo);\r
+ if (!sdl_is_init && !no_display) {\r
+ sdl_init(HVCFrame.frameInfo.nWidth, HVCFrame.frameInfo.nHeight, HVCFrame.frameInfo.nBitDepth, HVCFrame.frameInfo.nYPitch+32, use_pbo);\r
+ sdl_is_init=1;\r
+ start = gf_sys_clock();\r
+ nb_frames_at_start = i+1;\r
+ }\r
+ \r
+ if (no_display) {\r
+ OpenHevc_Frame HVCFrame_ptr;\r
+ libOpenHevcGetOutput(ohevc, 1, &HVCFrame_ptr);\r
+ } else if (use_raw_memory) {\r
+ OpenHevc_Frame HVCFrame_ptr;\r
+ libOpenHevcGetOutput(ohevc, 1, &HVCFrame_ptr);\r
+\r
+ sdl_draw_frame((u8 *) HVCFrame_ptr.pvY, (u8 *) HVCFrame_ptr.pvU, (u8 *) HVCFrame_ptr.pvV, HVCFrame.frameInfo.nWidth, HVCFrame.frameInfo.nHeight, HVCFrame.frameInfo.nBitDepth, HVCFrame.frameInfo.nYPitch+32);\r
+ } else {\r
+ memset(&HVCFrame, 0, sizeof(OpenHevc_Frame) );\r
+ HVCFrame.pvY = (void*) pY;\r
+ HVCFrame.pvU = (void*) pU;\r
+ HVCFrame.pvV = (void*) pV;\r
+ libOpenHevcGetOutputCpy(ohevc, 1, &HVCFrame);\r
+ sdl_draw_frame(pY, pU, pV, HVCFrame.frameInfo.nWidth, HVCFrame.frameInfo.nHeight, HVCFrame.frameInfo.nBitDepth, HVCFrame.frameInfo.nYPitch);\r
+ }\r
+ }\r
+\r
+ gf_isom_sample_del(&sample);\r
+\r
+ now = gf_sys_clock();\r
+ fprintf(stderr, "%d %% %d frames in %d ms - FPS %02.2g - push time %d ms - draw %d ms\r", 100*(i+1-nb_frames_at_start)/count, i+1-nb_frames_at_start, now-start, 1000.0 * (i+1-nb_frames_at_start) / (now-start), gl_upload_time / gl_nb_frames , (gl_draw_time - gl_upload_time) / gl_nb_frames ); \r
+ } else {\r
+ gf_sleep(10);\r
+ i--;\r
+ }\r
+ check_prompt++;\r
+ if (check_prompt==50) {\r
+ if (gf_prompt_has_input()) {\r
+ switch (gf_prompt_get_char()) {\r
+ case 'q':\r
+ run = 0;\r
+ break;\r
+ case 'm':\r
+ use_raw_memory = !use_raw_memory;\r
+ break;\r
+ case 'p':\r
+ if (paused) {\r
+ paused=0;\r
+ start += gf_sys_clock()-pause_time;\r
+ } else {\r
+ paused = 1;\r
+ pause_time=gf_sys_clock();\r
+ }\r
+ break;\r
+ case 'r':\r
+ start = gf_sys_clock();\r
+ nb_frames_at_start = i+1;\r
+ gl_upload_time = gl_draw_time = 0;\r
+ gl_nb_frames=1;\r
+ break;\r
+\r
+ }\r
+ }\r
+ check_prompt=0;\r
+ }\r
+ }\r
+ now = gf_sys_clock();\r
+ fprintf(stderr, "Decoded %d frames in %d ms - FPS %g\n", i+1, now-start, 1000.0 * (i+1) / (now-start) ); \r
+\r
+ libOpenHevcClose(ohevc);\r
+ gf_isom_close(isom);\r
+\r
+ if (!no_display) \r
+ sdl_close();\r
+ \r
+ gf_sys_close();\r
+ return 1;\r
+}\r
+\r
SunOS)
canon_arch=`isainfo -n`
;;
+ Darwin)
+ canon_arch="x86_64"
+ ;;
*)
canon_arch="`$cc -dumpmachine | sed -e 's,\([^-]*\)-.*,\1,'`"
;;
if [ -z "`echo $CFLAGS | grep -- -m32`" ]; then
cpu="x86_64"
want_pic="yes"
+ is_64="yes"
fi
fi
;;
GPAC_SH_FLAGS=""
strip="strip -x"
if test "$is_64" = "yes" ; then
- LDFLAGS="$LDFLAGS -read_only_relocs warning"
+ LDFLAGS="$LDFLAGS"
fi
darwin="yes"
gcc_version=`$cc -v 2>&1 | grep version | cut -d ' ' -f3`
cc="$cc $CFLAGS"
cxx="$cxx $CXXFLAGS"
+#look for zlib
+cat > $TMPC << EOF
+#include <emmintrin.h>
+int main( void ) { }
+EOF
+
+if $cc -o $TMPO $TMPC -msse2 $LDFLAGS 2> /dev/null ; then
+ CFLAGS="$CFLAGS -msse2"
+fi
+
#look for zlib
cat > $TMPC << EOF
else
js_flags="-DUSE_FFDEV_14 $js_flags"
fi
- else
-cat > $TMPC << EOF
-#include <jsapi.h>
-int main( void ) { JSContext *cx; JS_SetContextThread(cx); }
-EOF
- if $cc -o $TMPO $TMPC $js_flags $LDFLAGS $js_lib 2> /dev/null ; then
-cat > $TMPC << EOF
-#include <jsapi.h>
-int main( void ) { JSContext *cx; JS_SetRuntimeThread(cx); }
-EOF
- if $cc -o $TMPO $TMPC $js_flags $LDFLAGS $js_lib 2> /dev/null ; then
- js_flags="$js_flags"
- else
- js_flags="-DNO_JS_RUNTIMETHREAD $js_flags"
- fi
- else
- js_flags="-DUSE_FFDEV_11 $js_flags"
- fi
fi
fi
fi
fi
fi
-#fi
+
+
+if test "$has_js" != "no" ; then
+cat > $TMPC << EOF
+cat > $TMPC << EOF
+#include <jsapi.h>
+int main( void ) { JSContext *cx; JS_SetRuntimeThread(cx); }
+EOF
+ if $cc -o $TMPO $TMPC $js_flags $LDFLAGS $js_lib 2> /dev/null ; then
+ js_flags="$js_flags"
+ else
+ js_flags="-DNO_JS_RUNTIMETHREAD $js_flags"
+ fi
+fi
#end JS test
echo "#define GPAC_HAS_IPV6" >> $TMPH
fi
+if test "$is_64" = "yes" ; then
+ echo "#define GPAC_64_BITS" >> $TMPH
+fi
+
if test "$win32" = "yes" ; then
echo "CONFIG_DIRECTX=$has_mingw_directx" >> config.mak
if test "$has_mingw_directx" = "yes" ; then
fi
if test "$is_64" = "yes"; then
+#not on OSX ...
+if test "$darwin" = "yes"; then
+ echo "X11_LIB_PATH=$X11_PATH/lib" >> config.mak
+else
echo "X11_LIB_PATH=$X11_PATH/lib64" >> config.mak
+fi
else
echo "X11_LIB_PATH=$X11_PATH/lib" >> config.mak
fi
<br/>\r
<b><span style="Font-Size: 24pt">GPAC Configuration file documentation<br/>Version 0.5.0</span></b>\r
<br/>\r
-<b><span style="Font-Size: 18pt">Last Modified $LastChangedDate: 2014-02-20 18:47:55 +0000 (Do, 20. Feb 2014) $</span></b>\r
+<b><span style="Font-Size: 18pt">Last Modified $LastChangedDate: 2014-03-27 06:31:20 -0400 (Do, 27. Mär 2014) $</span></b>\r
</p>\r
\r
<br/><br/>\r
</p>\r
<b>DrawLateFrames</b> [value: <i>"yes" "no"</i>]\r
<p style="text-indent: 5%">\r
-If set, late frames will still be drawn. If not set, the late frames are droped (or executed for systems decoders) untill the decoder output is back in sync. This is by default off to keep better sync, but may be usefull when testing heavy content or slow renderers.\r
+If set, late frames will still be drawn. If not set, the late frames are droped (or executed for systems decoders) untill the decoder output is back in sync. This is by default on to keep better testing heavy content or slow renderers, but should be set to off when needing a better sync or monitoring skipped frames.\r
</p>\r
<b>ForceSingleClock</b> [value: <i>"yes" "no"</i>]\r
<p style="text-indent: 5%">\r
\r
\r
\r
-<b>ForceOpenGL</b> [value: <i>"yes", "no", "hybrid", "raster"</i>]\r
+<b>ForceOpenGL</b> [value: <i>"always", "disable", "hybrid", "raster"</i>]\r
<p style="text-indent: 5%">\r
Specifies that 2D rendering will be performed by OpenGL rather than raster 2D. This will involve polygon tesselation which may not be supported on all platforms, and 2D graphics will not loo as nice as 2D mode. The hybrid mode performs software drawing of 2D graphics with no textures (better quality) and uses OpenGL for all textures. The raster mode only uses OpenGL for pixel IO but does not perform polygin fill (no tesselation) (slow, mainly for test purposes).</p>\r
<b>DefaultNavigationMode</b> [value: <i>"Walk", "Fly", "Examine"</i>]\r
<b>AutoSwitchCount</b> [value: <i>positive integer</i>]\r
<p style="text-indent: 5%">\r
For debug purposes, instructs the player to switch representation every N segments. If 0 (default), switching is disabled.</p>\r
+<b>BufferMode</b> [value: <i>segments, minBuffer, none</i>]\r
+<p style="text-indent: 5%">\r
+Selects buffer mode:\r
+<ul> \r
+<li>segments: buffers complete segments as indicated in MPD before handing them to the player.</li>\r
+<li>minBuffer: asks the player to buffer media for the time indicated in the MPD (default mode), but segments are not pre-buffered.</li> \r
+<li>none: uses the player settings for buffering.</li> \r
+</ul>\r
+</p>\r
<b>DisableSwitching</b> [value: <i>yes, no</i>]\r
<p style="text-indent: 5%">\r
Disables automatic adaptation logic. Default is no</p>\r
Enables aborts of HTTP transfer when rate gets too low. This may result in a packet drops. Default is no.</p>\r
<b>UseServerUTC</b> [value: <i>yes, no</i>]\r
<p style="text-indent: 5%">\r
-Enables using <i>Server-UTC</i> HTTP header to compensate any drift between client and server. Default is no.\r
-\r
+Enables using <i>Server-UTC</i> HTTP header to compensate any drift between client and server. Default is yes.</p>\r
+<b>DebugAdaptationSet</b> [value: <i>integer</i>]\r
+<p style="text-indent: 5%">\r
+Plays only the adaptation set indicated by its index in the MPD. If index is negative, all sets are used (default mode).\r
+</p>\r
\r
<br/><br/>\r
\r
void libOpenHevcFlush(OpenHevc_Handle openHevcHandle);
const char *libOpenHevcVersion(OpenHevc_Handle openHevcHandle);
+void libOpenHevcSetActiveDecoders(OpenHevc_Handle openHevcHandle, int val);
+void libOpenHevcSetViewLayers(OpenHevc_Handle openHevcHandle, int val);
+
#ifdef __cplusplus
}
#endif
children [
Background2D {}
- DEF MovieControl MediaControl {}
+ DEF MovieControl MediaControl {
+ loop FALSE
+ }
DEF MovieSensor MediaSensor {
}
DEF Movie Transform2D {
current_duration = 0.0;
current_time = 0.0;
player_control = null;
+icon_pause=1;
+icon_play=0;
+max_playercontrol_width=1024;
+dynamic_scene=1;
+screen_width = 0;
+screen_height = 0;
+
+GF_STATE_PLAY=0;
+GF_STATE_PAUSE=1;
+GF_STATE_STOP=2;
+GF_STATE_TRICK=3;
all_extensions = [];
function on_movie_time(value)
{
+ var diff = current_time - value;
+ if (diff<0) diff = -diff;
/*filter out every 1/2 seconds*/
- if (current_time+0.5 > value) return;
+ if (diff < 0.5) return;
current_time = value;
player_control.set_time(value);
if (UPnP_Enabled) UPnP.MovieTime = value;
{
var w, h, r_w, r_h;
if (!width || !height) return;
-
+
+ if (width > gpac.screen_width) width = gpac.screen_width;
+ if (height > gpac.screen_height) height = gpac.screen_height;
w = width;
h = height;
r_w = r_h = 1;
root.children[0].backColor = gwskin.back_color;
movie.children[0].on_size = function(evt) {
+ if (!movie_connected) {
+ movie_connected = true;
+ gpac.set_3d(evt.type3d ? 1 : 0);
+ player_control.play.switch_icon(icon_pause);
+ dynamic_scene = evt.dynamic_scene;
+ }
if (!gpac.fullscreen) {
compute_movie_size(evt.width, evt.height);
}
}
- movie.children[0].addEventListener('gpac_scene_attached', movie.children[0].on_size, 0);
movie.children[0].on_media_progress = function(evt) {
if (!current_duration) return;
var percent_playback = 100.0 * current_time / current_duration;
//alert('URL data ' + percent_dload + ' - ' + percent_playback + ' playback');
}
- movie.children[0].addEventListener('progress', movie.children[0].on_media_progress, 0);
movie.children[0].on_media_playing = function(evt) {
- alert('URL is now paying');
+ player_control.play.switch_icon(icon_pause);
+ }
+ movie.children[0].on_media_end = function(evt) {
+ if (player_control.duration && movie_ctrl.loop) {
+ movie_ctrl.mediaStartTime = 0;
+ current_time=0;
+ }
}
+
+ movie.children[0].addEventListener('gpac_scene_attached', movie.children[0].on_size, 0);
+ movie.children[0].addEventListener('progress', movie.children[0].on_media_progress, 0);
movie.children[0].addEventListener('playing', movie.children[0].on_media_playing, 0);
movie.children[0].addEventListener('canplay', movie.children[0].on_media_playing, 0);
+ movie.children[0].addEventListener('ended', movie.children[0].on_media_end, 0);
movie.children[0].on_media_waiting = function(evt) {
alert('URL is now buffering');
if (url.indexOf('://')<0) set_movie_url('gpac://'+url);
else set_movie_url(url);
} else {
-// show_dock(true);
player_control.show();
}
}
test_resource.on_attached = function(evt) {
this.callback_done = true;
- var current_url = this.url[0];
+ current_url = this.url[0];
/*process the error or connect service*/
if (evt.error) {
gpacui_show_window(notif);
} else {
movie.children[0].url[0] = current_url;
+ movie_ctrl.mediaSpeed = 1;
+ movie_ctrl.mediaStartTime = 0;
movie_ctrl.url[0] = current_url;
movie_sensor.url[0] = current_url;
root.children[0].set_bind = FALSE;
- if (!movie_connected) {
- if (!gpac.fullscreen) {
- compute_movie_size(evt.width, evt.height);
- }
- movie_connected = true;
- gpac.set_3d(evt.type3d ? 1 : 0);
- }
+ movie.children[0].on_size(evt);
}
/*destroy the resource node*/
this.url.length = 0;
this.on_attached = null;
}
+
/*get notified when service loads or fails*/
test_resource.addEventListener('gpac_scene_attached', test_resource.on_attached, 0);
//performs layout on all contents
function layout()
{
- var i, list, start_x;
- player_control.set_size(display_width, player_control.height);
+ var i, list, start_x, w;
+ w = display_width;
+ if (max_playercontrol_width && (w>max_playercontrol_width)) w=max_playercontrol_width;
+ player_control.set_size(w, player_control.height);
dock.set_size(display_width, display_height);
wnd.snd_low = null;
wnd.snd_ctrl = null;
}
-
- if (0) {
- wnd.rewind = gw_new_icon_button(wnd.infobar, 'icons/media-seek-backward.svg', 'Rewind', 'icon');
- wnd.rewind.set_size(small_control_icon_size, small_control_icon_size);
- } else {
- wnd.rewind = null;
- }
- if (1) {
- wnd.stop = gw_new_icon_button(wnd.infobar, 'icons/media-playback-stop.svg', 'Stop', 'icon');
- wnd.stop.on_click = function() {
- player_control.stoped_url = ''+current_url;
+
+ wnd.set_state = function(state) {
+ if (!movie_connected && !controlled_renderer) return;
+
+ if (state==this.state) return;
+
+ if (state == GF_STATE_STOP) {
+ this.stoped_url = ''+current_url;
if (controlled_renderer) controlled_renderer.Stop();
else {
set_movie_url('');
/*override movie_connected to avoid auto-resizing*/
movie_connected = true;
}
-
movie_ctrl.mediaStartTime = 0;
- player_control.media_line.set_value(0);
- player_control.play.switch_icon(1);
- }
- wnd.stop.set_size(small_control_icon_size, small_control_icon_size);
- } else {
- wnd.stop = null;
- }
-
- wnd.play = gw_new_icon_button(wnd.infobar, 'icons/media-playback-start.svg', 'Play', 'icon');
- wnd.play.set_size(control_icon_size, control_icon_size);
- wnd.play.state = 0;
- wnd.play.add_icon('icons/media-playback-pause.svg');
- wnd.play.on_click = function() {
- if (!movie_connected && !controlled_renderer) return;
- if (player_control.stoped_url) {
+ this.media_line.set_value(0);
+ this.play.switch_icon(icon_play);
+ this.state = GF_STATE_STOP;
+ return;
+ }
+ if (state==GF_STATE_PAUSE) {
+ if (this.state==GF_STATE_STOP) return;
+ if (controlled_renderer) controlled_renderer.Pause();
+ movie_ctrl.mediaSpeed = 0;
+ this.state=GF_STATE_PAUSE;
+ this.play.switch_icon(icon_play);
+ return;
+ }
+ //we are playing, resume from stop if needed
+ if (this.stoped_url) {
if (controlled_renderer) {
controlled_renderer.Play();
} else {
- set_movie_url(player_control.stoped_url);
+ set_movie_url(this.stoped_url);
}
- player_control.stoped_url = null;
- this.state = 0;
- movie_ctrl.mediaStartTime = -1;
- } else if (movie_ctrl.mediaSpeed != 1) {
- this.state = 0;
- } else {
- this.state = this.state ? 0 : 1;
+ this.stoped_url = null;
+ //not in trick mode, next pause/play will restart from current time
+ if (state != GF_STATE_TRICK)
+ movie_ctrl.mediaStartTime = -1;
}
- this.switch_icon(this.state);
- if (this.state) {
- if (controlled_renderer) controlled_renderer.Pause();
- movie_ctrl.mediaSpeed = 0;
- } else {
+
+
+ if (state==GF_STATE_PLAY) {
if (controlled_renderer) controlled_renderer.Play();
+ this.state = state;
movie_ctrl.mediaSpeed = 1;
+ this.play.switch_icon(icon_pause);
+ return;
}
+ if (state==GF_STATE_TRICK) {
+ this.state = state;
+ this.play.switch_icon(icon_play);
+ movie_ctrl.mediaStartTime = -1;
+ return;
+ }
+ }
+
+ wnd.stop = gw_new_icon_button(wnd.infobar, 'icons/media-playback-stop.svg', 'Stop', 'icon');
+ wnd.stop.on_click = function() {
+ player_control.set_state(GF_STATE_STOP);
+ }
+ wnd.stop.set_size(small_control_icon_size, small_control_icon_size);
+
+
+ if (0) {
+ wnd.rewind = gw_new_icon_button(wnd.infobar, 'icons/media-seek-backward.svg', 'Rewind', 'icon');
+ wnd.rewind.set_size(small_control_icon_size, small_control_icon_size);
+ } else {
+ wnd.rewind = null;
+ }
+
+ wnd.play = gw_new_icon_button(wnd.infobar, 'icons/media-playback-start.svg', 'Play', 'icon');
+ wnd.play.set_size(control_icon_size, control_icon_size);
+ wnd.state = GF_STATE_PLAY;
+ wnd.play.add_icon('icons/media-playback-pause.svg');
+ wnd.play.on_click = function() {
+ player_control.set_state( (player_control.state==GF_STATE_PLAY) ? GF_STATE_PAUSE : GF_STATE_PLAY);
}
- wnd.play.on_long_click = function () {
- var cur_url = current_url;
- set_movie_url('');
- set_movie_url(cur_url);
+
+ if (!browser_mode) {
+ wnd.forward = gw_new_icon_button(wnd.infobar, 'icons/media-seek-forward.svg', 'Forward', 'icon');
+ wnd.forward.on_click = function() {
+ if (movie_ctrl.mediaSpeed) {
+ player_control.set_state(GF_STATE_TRICK);
+ movie_ctrl.mediaSpeed = 2*movie_ctrl.mediaSpeed;
+ }
+ }
+ wnd.forward.set_size(small_control_icon_size, small_control_icon_size);
+ } else {
+ wnd.forward = null;
}
wnd.media_line = gw_new_progress_bar(wnd.infobar, false, true);
wnd.media_line.on_slide = function(value, type) {
-
if (!movie_connected && !controlled_renderer) {
this.set_value(0);
return;
}
+
var duration = player_control.duration;
if (!duration) return;
var time = value*duration/100;
}
root.children[0].set_bind = FALSE;
switch (type) {
+ //sliding
case 1:
+ player_control.set_state(GF_STATE_PAUSE);
movie_ctrl.mediaStartTime = time;
movie_ctrl.mediaSpeed = 0;
break;
+ //done sliding
case 2:
+ player_control.set_state(GF_STATE_PLAY);
if (time!= movie_ctrl.mediaStartTime) movie_ctrl.mediaStartTime = time;
movie_ctrl.mediaSpeed = 1;
break;
+ //init slide, go in play mode
default:
+ if (player_control.state==GF_STATE_STOP)
+ player_control.set_state(GF_STATE_PLAY);
+
+ player_control.set_state(GF_STATE_PAUSE);
movie_ctrl.mediaStartTime = time;
break;
}
wnd.time.set_size(control_icon_size, control_icon_size);
wnd.time.set_width(4*wnd.time.font_size() );
- if (!browser_mode) {
- wnd.forward = gw_new_icon_button(wnd.infobar, 'icons/media-seek-forward.svg', 'Forward', 'icon');
- wnd.forward.on_click = function() {
- if (movie_ctrl.mediaSpeed) {
- movie_ctrl.mediaSpeed = 2*movie_ctrl.mediaSpeed;
- }
+ if (0) {
+ wnd.loop = gw_new_icon_button(wnd.infobar, 'vector/loop.svg', 'Loop', 'icon');
+ wnd.loop.on_click = function () {
+ movie_ctrl.loop = movie_ctrl.loop ? FALSE : TRUE;
}
- wnd.forward.set_size(small_control_icon_size, small_control_icon_size);
+ wnd.loop.set_size(small_control_icon_size, small_control_icon_size);
} else {
- wnd.forward = null;
+ wnd.loop = null;
}
- if (1) {
- wnd.view = gw_new_icon_button(wnd.infobar, 'icons/edit-find.svg', 'Navigation', 'icon');
- wnd.view.on_click = function() {
- select_navigation_type();
- }
- wnd.view.set_size(small_control_icon_size, small_control_icon_size);
- } else {
- wnd.view = null;
+
+ wnd.view = gw_new_icon_button(wnd.infobar, 'icons/edit-find.svg', 'Navigation', 'icon');
+ wnd.view.on_click = function() {
+ select_navigation_type();
}
+ wnd.view.set_size(small_control_icon_size, small_control_icon_size);
+
if (!browser_mode) {
wnd.open = gw_new_icon_button(wnd.infobar, 'icons/folder.svg', 'Open', 'icon');
var min_w, full_w, time_w;
var control_icon_size = gwskin.default_icon_size;
this.move(0, Math.floor( (height-display_height)/2) );
-
+
width -= control_icon_size/2;
min_w = this.play.width + this.time.width;
if (this.open) min_w += this.open.width;
if (this.home) min_w += this.home.width;
- if (this.exit) min_w += this.exit.width;
+ if (this.exit && gpac.fullscreen) min_w += this.exit.width;
full_w = 0;
if (this.snd_low) full_w += this.snd_low.width;
if (this.snd_ctrl) full_w += this.snd_ctrl.width;
if (this.view) {
this.view.hide();
- if (movie_connected && (gpac.navigation_type!= GF_NAVIGATE_TYPE_NONE) ) {
+ if (!dynamic_scene && movie_connected && (gpac.navigation_type!= GF_NAVIGATE_TYPE_NONE) ) {
full_w+= this.view.width;
}
}
if (this.duration) {
+ if (this.stop) full_w += this.stop.width;
+ if (this.play) full_w += this.play.width;
if (this.rewind) full_w+= this.rewind.width;
if (this.forward) full_w+= this.forward.width;
+ if (this.loop) full_w += this.loop.width;
}
- if (this.stop) full_w+= this.stop.width;
if (this.remote && UPnP.MediaRenderersCount && (current_url!='')) {
full_w += this.remote.width;
}
time_w = this.media_line.visible ? 2*control_icon_size : 0;
+
+ if (this.exit) {
+ if (gpac.fullscreen) {
+ this.exit.show();
+ } else {
+ this.exit.hide();
+ }
+ }
+
if (min_w + full_w + time_w < width) {
if (this.media_line.visible)
this.media_line.set_size(width - min_w - full_w - control_icon_size/3, control_icon_size/3);
if (this.duration) {
if (this.rewind) this.rewind.show();
if (this.forward) this.forward.show();
+ if (this.loop) this.loop.show();
+ if (this.stop) this.stop.show();
}
- if (this.stop) this.stop.show();
if (wnd.fullscreen) wnd.fullscreen.show();
if (this.remote) {
}
}
- if (this.view && movie_connected && (gpac.navigation_type!= GF_NAVIGATE_TYPE_NONE) ) {
+ if (this.view && !dynamic_scene && movie_connected && (gpac.navigation_type!= GF_NAVIGATE_TYPE_NONE) ) {
this.view.show();
}
} else {
if (this.rewind) this.rewind.hide();
if (this.stop) this.stop.hide();
if (this.forward) this.forward.hide();
+ if (this.loop) this.loop.hide();
if (this.fullscreen) this.fullscreen.hide();
if (this.remote) this.remote.hide();
wnd.duration = 0;
wnd.set_duration = function(value) {
this.duration = value;
- wnd.time.show();
- wnd.media_line.show();
- if (wnd.rewind) wnd.rewind.show();
- if (wnd.stop) wnd.stop.show();
- if (wnd.forward) wnd.forward.show();
if (!value) {
wnd.time.hide();
wnd.media_line.hide();
if (wnd.rewind) wnd.rewind.hide();
if (wnd.stop) wnd.stop.hide();
if (wnd.forward) wnd.forward.hide();
+ if (wnd.loop) wnd.loop.hide();
wnd.time.set_size(0, control_icon_size);
wnd.time.set_width(0);
- } else if (value<3600) {
- wnd.time.set_size(control_icon_size/2, control_icon_size);
- wnd.time.set_width(3*wnd.time.font_size() );
} else {
- wnd.time.set_size(control_icon_size, control_icon_size);
- wnd.time.set_width(4*wnd.time.font_size() );
+ wnd.time.show();
+ wnd.media_line.show();
+ if (wnd.rewind) wnd.rewind.show();
+ if (wnd.stop) wnd.stop.show();
+ if (wnd.forward) wnd.forward.show();
+ if (wnd.loop) wnd.loop.show();
+ if (value<3600) {
+ wnd.time.set_size(control_icon_size/2, control_icon_size);
+ wnd.time.set_width(3*wnd.time.font_size() );
+ } else {
+ wnd.time.set_size(control_icon_size, control_icon_size);
+ wnd.time.set_width(4*wnd.time.font_size() );
+ }
}
this.layout(this.width, this.height);
}
filebrowse.browse(gpac.last_working_directory);
filebrowse.on_browse = function(value, directory) {
- if (directory) gpac.last_working_directory = directory;
- set_movie_url(value);
- show_dock(false);
+ if (value==null) {
+ player_control.set_state(this.prev_state);
+ player_control.show();
+ } else {
+ if (directory) gpac.last_working_directory = directory;
+ set_movie_url(value);
+ show_dock(false);
+ }
}
+ var w = display_width/2;
+ if (w<200) w = display_width-20;
+ filebrowse.set_size(w, 3*display_height/4);
+ if (gpac.hardware_rgba) filebrowse.set_alpha(0.8);
- filebrowse.set_size(display_width, display_height);
+ player_control.hide();
gpacui_show_window(filebrowse);
- set_movie_url('');
-// filebrowse.set_alpha(0.8);
-
+ filebrowse.prev_state = player_control.state;
+ player_control.set_state(GF_STATE_PAUSE);
}
urldlg = null;
gwskin.window.width = 320;
gwskin.window.height = 240;
gwskin.window.normal = gw_new_appearance(0.6, 0.6, 0.6);
-gwskin.window.normal.texture = gw_make_gradient('vertical', [0, 0.85, 1], [0.6, 0.6, 0.6, 1, 1, 1, 0.6, 0.6, 0.6]);
+//gwskin.window.normal.texture = gw_make_gradient('vertical', [0, 0.85, 1], [0.6, 0.6, 0.6, 1, 1, 1, 0.6, 0.6, 0.6]);
gwskin.window.normal.skin = true;
gwskin.window.text = gwskin.label.text;
gwskin.window.font = gw_new_fontstyle(gwskin.window.font_size, 1);
}
start_y -= children[i].height;
}
- if (this.selected_idx < this.first_visible) this.selected_idx = this.first_visible;
- else if (this.selected_idx > this.last_visible) this.selected_idx = this.last_visible;
+// if (this.selected_idx < this.first_visible) this.selected_idx = this.first_visible;
+// else if (this.selected_idx > this.last_visible) this.selected_idx = this.last_visible;
+
+ if (this.selected_idx < this.first_visible) this.selected_idx = -1;
+ else if (this.selected_idx > this.last_visible) this.selected_idx = -1;
}
obj.add_child = function(child) {
this.children[0].children[this.children[0].children.length] = child;
return 1;
case GF_EVENT_KEYDOWN:
var children = this.get_children();
+
+ if (this.selected_idx < this.first_visible) this.selected_idx = this.first_visible;
+ else if (this.selected_idx > this.last_visible) this.selected_idx = this.last_visible;
+
if (evt.keycode=='Up') {
// alert('sel '+ this.selected_idx + ' first '+this.first_visible+ ' last '+this.last_visible);
if (children[this.selected_idx].translation.y + children[this.selected_idx].height/2 > this.height/2 ) {
}
}
+ dlg.on_close = function () {
+ if (this.on_browse) {
+ this.on_browse(null, false);
+ }
+ }
dlg.go_up = dlg.add_tool(gwskin.images.previous, gwskin.labels.up);
dlg.go_up.on_click = function() {
GF_Err gf_avc_get_pps_info(char *pps, u32 pps_size, u32 *pps_id, u32 *sps_id);
const char *gf_avc_get_profile_name(u8 video_prof);
+//hevc_state is optionnal but shall be used for layer extensions since all size info is in VPS and not SPS
GF_Err gf_hevc_get_sps_info(char *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d);
const char *gf_hevc_get_profile_name(u8 video_prof);
#endif /*GPAC_DISABLE_AV_PARSERS*/
u8 high;
} GF_ColorKey;
-/*!\brief not done yet
+/*!\brief stretches two video surfaces
*
+ * Software stretch of source surface ont destination surface.
+ *\param dst destination surface
+ *\param src source surface
+ *\param dst_wnd destination rectangle. If null the entire destination surface is used
+ *\param src_wnd source rectangle. If null the entire source surface is used
+ *\param alpha blend factor of source over alpha
+ *\param flip flips the source
+ *\param colorKey makes source pixel matching the color key transparent
+ *\param cmat applies color matrix to the source
+ *\return error code if any
*/
GF_Err gf_stretch_bits(GF_VideoSurface *dst, GF_VideoSurface *src, GF_Window *dst_wnd, GF_Window *src_wnd, u8 alpha, Bool flip, GF_ColorKey *colorKey, GF_ColorMatrix * cmat);
+/*!\brief copies YUV 420 10 bits to YUV destination (only YUV420 8 bits supported)
+ *
+ * Software stretch of source surface ont destination surface.
+ *\param vs_dst destination surface
+ *\param pY source Y plane
+ *\param pU source U plane. if NULL, the U plane is located after the Y plane
+ *\param pV source V plane. if NULL, the V plane is located after the U plane
+ *\param src_stride source stride in bytes
+ *\param src_width source width in pixels
+ *\param src_height source height in pixels
+ *\param src_wnd source rectangle. If null the entire source surface is used
+ *\return error code if any
+ */
+GF_Err gf_color_write_yv12_10_to_yuv(GF_VideoSurface *vs_dst, unsigned char *pY, unsigned char *pU, unsigned char*pV, u32 src_stride, u32 src_width, u32 src_height, const GF_Window *src_wnd);
+
/*! @} */
GF_Err gf_sc_set_scene(GF_Compositor *sr, GF_SceneGraph *scene_graph);
/*if the compositor doesn't use its own thread for visual, this will perform a render pass*/
-Bool gf_sc_draw_frame(GF_Compositor *sr);
+Bool gf_sc_draw_frame(GF_Compositor *sr, u32 *ms_till_next);
/*inits rendering info for the node - shall be called for all nodes the parent system doesn't handle*/
void gf_sc_on_node_init(GF_Compositor *sr, GF_Node *node);
/*return the compositor time - this is the time every time line syncs on*/
u32 gf_sc_get_clock(GF_Compositor *sr);
+//signals the node is about to be destroyed (called after the node destructor if any). If node is NULL, SG will be set to indicate the entire scene graph is about to be reset
+void gf_sc_node_destroy(GF_Compositor *compositor, GF_Node *node, GF_SceneGraph *sg);
/*locks/unlocks the visual scene rendering - modification of the scene tree shall only happen when scene compositor is locked*/
void gf_sc_lock(GF_Compositor *sr, Bool doLock);
/*this file defines all common macros for libgpac compilation
except for symbian32 which uses .mmp directives ... */
-#if defined(WIN32) || defined(_WIN32_WCE) || defined(GPAC_CONFIG_DARWIN) /*visual studio and xcode*/
-/*enables GPAC fixed point*/
-//#define GPAC_FIXED_POINT
+
+/*visual studio and xcode*/
+#if defined(WIN32) || defined(_WIN32_WCE) || defined(GPAC_CONFIG_DARWIN)
/*enables GPAC memory tracking in debug mode only*/
#if defined(DEBUG) || defined(_DEBUG)
#define GPAC_MEMORY_TRACKING
#endif
-/*platform is big endian*/
-//#define GPAC_BIG_ENDIAN
-
-/*SSL enabled*/
+/*SSL enabled - no 64 bit support yet*/
#if defined(WIN32) && !defined(_WIN64)
#define GPAC_HAS_SSL
#endif
#define GPAC_HAS_SPIDERMONKEY
#ifdef GPAC_CONFIG_DARWIN
#define MOZILLA_1_8_BRANCH
+#define XP_UNIX
#endif
-/*zlib enabled*/
-//#define GPAC_DISABLE_ZLIB
-
/*libjpeg enabled*/
#define GPAC_HAS_JPEG
-
/*pnj enabled*/
#define GPAC_HAS_PNG
/*IPv6 enabled - for win32, this is evaluated at compile time, !! do not uncomment !!*/
-//#define GPAC_HAS_IPV6
-/*3D compositor disabled*/
-#ifdef GPAC_CONFIG_DARWIN
-//#define GPAC_DISABLE_3D
-#endif
-/*use TinyGL instead of OpenGL*/
-//#define GPAC_USE_TINYGL
+//iOS compilation
+#if defined(GPAC_CONFIG_DARWIN) && defined(GPAC_IPHONE)
-/*use OpenGL ES instead of OpenGL*/
-#ifdef GPAC_CONFIG_DARWIN
#define GPAC_USE_OGL_ES
#define GPAC_FIXED_POINT
-#ifdef GPAC_IPHONE
#define GPAC_HAS_GLU
-#endif
-#endif
-
/*lazy definition of extra libs for iOS*/
-#if defined(GPAC_IPHONE)
#define GPAC_HAS_FAAD
//#define GPAC_HAS_MAD
#define GPAC_HAS_SDL
#define GPAC_HAS_FREETYPE
+
+#endif //end iOS flags
+
+
+//OSX compilation
+#if defined(GPAC_CONFIG_DARWIN) && !defined(GPAC_IPHONE)
+
+#define GPAC_HAS_IPV6
+#define GPAC_HAS_SSL
+
+#ifdef __LP64__
+#define GPAC_64_BITS
#endif
+#endif //end OSX flags
+
+
+//WinCE flags
#if defined(_WIN32_WCE)
#ifndef GPAC_FIXED_POINT
#define GPAC_USE_OGL_ES
#endif
-#endif /*_WIN32_WCE*/
+#endif //WinCE flags
-#endif /*defined(WIN32) || defined(_WIN32_WCE)*/
+#endif /*defined(WIN32) || defined(_WIN32_WCE) || defined(GPAC_CONFIG_DARWIN)*/
#if defined(__SYMBIAN32__)
/*disables dashclient */
//#define GPAC_DISABLE_DASH_CLIENT
+/*disables Timed Text support */
+//#define GPAC_DISABLE_TTXT
+
#endif /*_GF_CONFIG_H_*/
GPAC_OTI_VIDEO_HEVC = 0x23,
/*!OTI for H264-SVC streams*/
GPAC_OTI_VIDEO_SVC = 0x24,
- /*!OTI for H264-SVC streams*/
+ /*!OTI for HEVC layered streams*/
GPAC_OTI_VIDEO_SHVC = 0x25,
/*!OTI for MPEG-4 AAC streams*/
GPAC_OTI_AUDIO_AAC_MPEG4 = 0x40,
/*indicates whether a group is selected for playback or not. Currently groups cannot be selected during playback*/
Bool gf_dash_is_group_selected(GF_DashClient *dash, u32 group_index);
+/*indicates whether a group can be selected for playback or not. Some groups may have been disabled because of non supported features*/
+Bool gf_dash_is_group_selectable(GF_DashClient *dash, u32 idx);
+
/*selects a group for playback. If other groups are alternate to this group (through the @group attribute), they are automatically deselected. */
void gf_dash_group_select(GF_DashClient *dash, u32 idx, Bool select);
//shifts UTC clock of server by shift_utc_ms so that new UTC in MPD is old + shift_utc_ms
void gf_dash_set_utc_shift(GF_DashClient *dash, s32 shift_utc_ms);
-//sets max resolution for all video
-GF_Err gf_dash_set_max_resolution(GF_DashClient *dash, u32 width, u32 height);
+//sets max resolution@bpp for all video
+GF_Err gf_dash_set_max_resolution(GF_DashClient *dash, u32 width, u32 height, u8 max_display_bpp);
//sets min time in ms between a 404 and the next request on the same group. The default value is 500 ms.
GF_Err gf_dash_set_min_timeout_between_404(GF_DashClient *dash, u32 min_timeout_between_404);
//sets time in ms after which 404 request for a segment will indicate segment lost. The default value is 100 ms.
GF_Err gf_dash_set_segment_expiration_threshold(GF_DashClient *dash, u32 expire_after_ms);
+
+//only enables the given group - this shall be set before calling @gf_dash_open. If group_index is <0 (default) no groups will be disabled.
+void gf_dash_debug_group(GF_DashClient *dash, s32 group_index);
+
+//indicates typical buffering used by the user app . This allows fetching data earlier in live mode, if the timeshiftbuffer allows for it
+void gf_dash_set_user_buffer(GF_DashClient *dash, u32 buffer_time_ms);
+
#endif //GPAC_DISABLE_DASH_CLIENT
Bool is_connected;
} GF_EventConnect;
+/*event proc return value: 1 to indicate the terminal should attempt a default layout for this addon, 0: nothing will be done*/
+typedef struct
+{
+ /*GF_EVENT_ADDON_DETECTED*/
+ u8 type;
+ const char *addon_url;
+ const char *mime_type;
+} GF_EventAddonConnect;
+
/*event proc return value: 1 if info has been completed, 0 otherwise (and operation this request was for
will then fail)*/
typedef struct
GF_EventMutation mutation;
GF_EventForwarded forwarded_event;
GF_EventOpenFile open_file;
+ GF_EventAddonConnect addon_connect;
} GF_Event;
/* Events for Keyboad */
GF_EVENT_TEXT_EDITING_START,
GF_EVENT_TEXT_EDITING_END,
+
+ GF_EVENT_ADDON_DETECTED,
} GF_EventType;
/*GPAC/DOM3 key codes*/
JSObject *_this;
GF_List *times;
+ u32 timescale;
} GF_HTML_MediaTimeRanges;
typedef enum {
u32 packet_index; /* index of MSE Packets*/\
GF_Mutex *buffer_mutex;\
Bool last_dts_set; \
- double last_dts; /* MSE last decode timestamp */ \
+ u64 last_dts; /* MSE last decode timestamp (in timescale units)*/ \
+ u32 last_dur; /* MSE last frame duration (in timescale units)*/ \
Bool highest_pts_set; \
- double highest_pts; /* MSE highest presentation timestamp */ \
+ u64 highest_pts; /* MSE highest presentation timestamp (in timescale units)*/ \
Bool needs_rap; /* MSE need random access point flag */ \
u32 timescale; /* used by time stamps in MSE Packets */ \
+ s64 timestampOffset; /* MSE SourceBuffer value (in timescale units) */ \
/* standard HTML properties */ \
GF_HTML_TrackType type;\
char *id;\
/* JavaScript counterpart */
JSObject *_this;
- GF_HTML_MediaTimeRanges buffered;
- GF_HTML_MediaTimeRanges seekable;
- GF_HTML_MediaTimeRanges played;
+ GF_HTML_MediaTimeRanges *buffered;
+ GF_HTML_MediaTimeRanges *seekable;
+ GF_HTML_MediaTimeRanges *played;
Bool paused;
GF_HTML_MediaControllerPlaybackState playbackState;
double defaultPlaybackRate;
/* crossOrigin: "must reflect the content of the attribute of the same name", use the node */
/* networkState: retrieved dynamically from GPAC Service */
/* preload: "must reflect the content of the attribute of the same name", use the node */
- GF_HTML_MediaTimeRanges buffered;
+ GF_HTML_MediaTimeRanges *buffered;
/* ready state */
/* readyState: retrieved from GPAC Media Object dynamically */
Bool seeking;
char *startDate;
Bool paused;
double defaultPlaybackRate;
- GF_HTML_MediaTimeRanges played;
- GF_HTML_MediaTimeRanges seekable;
+ GF_HTML_MediaTimeRanges *played;
+ GF_HTML_MediaTimeRanges *seekable;
/* ended: retrieved from the state of GPAC Media Object */
/* autoplay: "must reflect the content of the attribute of the same name", use the node */
/* loop: "must reflect the content of the attribute of the same name", use the node */
/*
* TimeRanges
*/
-GF_Err gf_media_time_ranges_add(GF_HTML_MediaTimeRanges *timeranges, double start, double end);
+GF_HTML_MediaTimeRanges *gf_html_timeranges_new(u32 timescale);
+GF_Err gf_html_timeranges_add_start(GF_HTML_MediaTimeRanges *timeranges, u64 start);
+GF_Err gf_html_timeranges_add_end(GF_HTML_MediaTimeRanges *timeranges, u64 end);
void gf_html_timeranges_reset(GF_HTML_MediaTimeRanges *range);
void gf_html_timeranges_del(GF_HTML_MediaTimeRanges *range);
+GF_HTML_MediaTimeRanges *gf_html_timeranges_intersection(GF_HTML_MediaTimeRanges *a, GF_HTML_MediaTimeRanges *b);
+GF_HTML_MediaTimeRanges *gf_html_timeranges_union(GF_HTML_MediaTimeRanges *a, GF_HTML_MediaTimeRanges *b);
/*
* HTML5 TrackList
*/
GF_HTML_MediaElement *gf_html_media_element_new(GF_Node *media_node, GF_HTML_MediaController *mc);
void gf_html_media_element_del(GF_HTML_MediaElement *me);
-GF_DOMEventTarget *gf_html_media_get_event_target_from_node(GF_Node *n);
void html_media_element_js_init(JSContext *c, JSObject *new_obj, GF_Node *n);
/* MSE defined properties */
Bool updating;
- GF_HTML_MediaTimeRanges buffered;
- double timestampOffset;
+ GF_HTML_MediaTimeRanges *buffered;
+ s64 timestampOffset;
double appendWindowStart;
double appendWindowEnd;
u32 timescale;
GF_HTML_MediaSource_AppendState append_state;
Bool buffer_full_flag;
+ /* Mode used to append media data:
+ - "segments" uses the timestamps in the media,
+ - "sequence" ignores them and appends just after the previous data */
GF_HTML_MediaSource_AppendMode append_mode;
- double group_start_timestamp;
+
+ /* time (in timescale units) of the first frame in the group */
+ u64 group_start_timestamp;
Bool group_start_timestamp_flag;
- double highest_end_timestamp;
- Bool highest_end_timestamp_set;
+ /* time (in timescale units) of the frame end time (start + duration) in the group */
+ u64 group_end_timestamp;
+ Bool group_end_timestamp_set;
+
Bool first_init_segment;
- double remove_start;
- double remove_end;
+ /* times (in timescale units) of the frames to be removed */
+ u64 remove_start;
+ u64 remove_end;
/*
* GPAC internal objects
*/
- /* Media tracks associated to this source buffer */
+ /* Media tracks (GF_HTML_Track) associated to this source buffer */
GF_List *tracks;
/* Buffers to parse */
GF_List *input_buffer;
Bool parser_connected;
/* Threads used to asynchronously parse the buffer and remove media data */
+ GF_List *threads;
GF_Thread *parser_thread;
GF_Thread *remove_thread;
void gf_mse_mediasource_open(GF_HTML_MediaSource *ms, struct _mediaobj *mo);
void gf_mse_mediasource_close(GF_HTML_MediaSource *ms);
void gf_mse_mediasource_end(GF_HTML_MediaSource *ms);
+void gf_mse_mediasource_add_source_buffer(GF_HTML_MediaSource *ms, GF_HTML_SourceBuffer *sb);
GF_HTML_SourceBuffer *gf_mse_source_buffer_new(GF_HTML_MediaSource *mediasource);
+void gf_mse_source_buffer_set_timestampOffset(GF_HTML_SourceBuffer *sb, double d);
+void gf_mse_source_buffer_set_timescale(GF_HTML_SourceBuffer *sb, u32 timescale);
GF_Err gf_mse_source_buffer_load_parser(GF_HTML_SourceBuffer *sourcebuffer, const char *mime);
-void gf_mse_add_source_buffer(GF_HTML_MediaSource *ms, GF_HTML_SourceBuffer *sb);
+GF_Err gf_mse_remove_source_buffer(GF_HTML_MediaSource *ms, GF_HTML_SourceBuffer *sb);
void gf_mse_source_buffer_del(GF_HTML_SourceBuffer *sb);
GF_Err gf_mse_source_buffer_abort(GF_HTML_SourceBuffer *sb);
void gf_mse_source_buffer_append_arraybuffer(GF_HTML_SourceBuffer *sb, GF_HTML_ArrayBuffer *buffer);
void gf_mse_source_buffer_update_buffered(GF_HTML_SourceBuffer *sb);
-u32 gf_mse_source_buffer_remove(void *par);
-
+void gf_mse_remove(GF_HTML_SourceBuffer *sb, double start, double end);
+
typedef struct
{
char *data;
GF_RTP_PAYT_H264_SVC,
/*use HEVC/H265 transport - no RFC yet, only draft*/
GF_RTP_PAYT_HEVC,
+ GF_RTP_PAYT_SHVC
};
Bool npot_texture;
Bool rect_texture;
Bool point_sprite;
- Bool vbo;
+ Bool vbo, pbo;
u32 yuv_texture;
Bool has_shaders;
+ s32 max_texture_size;
} GLCaps;
#endif
/*0: not init, 1: running, 2: exit requested, 3: done*/
u32 video_th_state;
+ u32 video_th_id;
+
/*compositor exclusive access to the scene and display*/
GF_Mutex *mx;
/*all textures (texture handlers)*/
GF_List *textures;
-#ifdef GF_SR_EVENT_QUEUE
+ /*all textures to be destroyed (needed for openGL context ...)*/
+ GF_List *textures_gc;
+
/*event queue*/
- GF_List *events;
- GF_Mutex *ev_mx;
-#endif
+ GF_List *event_queue, *event_queue_back;
+ GF_Mutex *evq_mx;
Bool video_setup_failed;
Bool show_caret;
Bool text_edit_changed;
u32 scene_sampled_clock;
-
u32 last_click_time;
u32 next_frame_delay;
s32 frame_delay;
+ Bool video_frame_pending;
+ Bool fullscreen_postponed;
/*display size*/
u32 display_width, display_height;
/*options*/
u32 aspect_ratio, antiAlias, texture_text_mode;
Bool high_speed, stress_mode;
- Bool was_opengl;
+ Bool is_opengl;
Bool autoconfig_opengl;
u32 force_opengl_2d;
#ifdef OPENGL_RASTER
Bool disable_gl_cull;
/*YUV textures in OpenGL are disabled (soft YUV->RGB )*/
Bool disable_yuvgl;
+ //use PBO to start pushing textures at the begining of the render pass
+ Bool enable_pbo;
u32 default_navigation_mode;
#endif
};
+typedef struct
+{
+ GF_Event evt;
+ GF_DOM_Event dom_evt;
+ GF_Node *node;
+ GF_DOMEventTarget *target;
+ GF_SceneGraph *sg;
+} GF_QueuedEvent;
+
+void gf_sc_queue_dom_event(GF_Compositor *compositor, GF_Node *node, GF_DOM_Event *evt);
+void gf_sc_queue_dom_event_on_target(GF_Compositor *compositor, GF_DOM_Event *evt, GF_DOMEventTarget *target, GF_SceneGraph *sg);
/*base stack for timed nodes (nodes that activate themselves at given times)
@UpdateTimeNode: shall be setup by the node handler and is called once per simulation frame
/*returns 1 if url changed from current one*/
Bool gf_sc_texture_check_url_change(GF_TextureHandler *txh, MFURL *url);
+/* opens associated object */
+GF_Err gf_sc_texture_open(GF_TextureHandler *txh, MFURL *url, Bool lock_scene_timeline);
/*starts associated object*/
GF_Err gf_sc_texture_play(GF_TextureHandler *txh, MFURL *url);
GF_Err gf_sc_texture_play_from_to(GF_TextureHandler *txh, MFURL *url, Double start_offset, Double end_offset, Bool can_loop, Bool lock_scene_timeline);
/*release video memory if needed*/
void gf_sc_texture_release_stream(GF_TextureHandler *txh);
+void gf_sc_texture_cleanup_hw(GF_Compositor *compositor);
/*sensor node handler - this is not defined as a stack because Anchor is both a grouping node and a
Bool gf_sc_use_raw_texture(GF_Compositor *compositor);
-void gf_sc_get_av_caps(GF_Compositor *compositor, u32 *width, u32 *height, u32 *bpp, u32 *channels, u32 *sample_rate);
+void gf_sc_get_av_caps(GF_Compositor *compositor, u32 *width, u32 *height, u32 *display_bit_depth, u32 *audio_bpp, u32 *channels, u32 *sample_rate);
//signals the compositor a system frame is pending on a future frame
void gf_sc_set_system_pending_frame(GF_Compositor *compositor, Bool frame_pending);
+//indicates a video frame is pending - this is used fo decoders dispatching their internal memory in order to wake up the compositor asap
+void gf_sc_set_video_pending_frame(GF_Compositor *compositor);
+
+Bool gf_sc_is_over(GF_Compositor *compositor, GF_SceneGraph *scene_graph);
+
#ifdef __cplusplus
}
#endif
GF_ISOM_BOX_TYPE_PSSH = GF_4CC( 'p', 's', 's', 'h' ),
GF_ISOM_BOX_TYPE_TENC = GF_4CC( 't', 'e', 'n', 'c' ),
+ /*Adobe's protection boxes*/
+ GF_ISOM_BOX_TYPE_ADKM = GF_4CC( 'a', 'd', 'k', 'm' ),
+ GF_ISOM_BOX_TYPE_AHDR = GF_4CC( 'a', 'h', 'd', 'r' ),
+ GF_ISOM_BOX_TYPE_ADAF = GF_4CC( 'a', 'd', 'a', 'f' ),
+ GF_ISOM_BOX_TYPE_APRM = GF_4CC( 'a', 'p', 'r', 'm' ),
+ GF_ISOM_BOX_TYPE_AEIB = GF_4CC( 'a', 'e', 'i', 'b' ),
+ GF_ISOM_BOX_TYPE_AKEY = GF_4CC( 'a', 'k', 'e', 'y' ),
+ GF_ISOM_BOX_TYPE_FLXS = GF_4CC( 'f', 'l', 'x', 's' ),
+
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
/*Movie Fragments*/
GF_ISOM_BOX_TYPE_MVEX = GF_4CC( 'm', 'v', 'e', 'x' ),
struct __oma_kms_box *okms;
struct __cenc_tenc_box *tenc;
struct __piff_tenc_box *piff_tenc;
+ struct __adobe_drm_key_management_system_box *adkm;
} GF_SchemeInformationBox;
typedef struct __tag_protect_box
GF_Err senc_Parse(GF_BitStream *bs, GF_TrackBox *trak, GF_TrackFragmentBox *traf, GF_SampleEncryptionBox *ptr);
+/*
+ Boxes for Adobe's protection scheme
+*/
+typedef struct __adobe_enc_info_box
+{
+ GF_ISOM_FULL_BOX
+ char *enc_algo; /*spec: The encryption algorithm shall be 'AES-CBC'*/
+ u8 key_length;
+} GF_AdobeEncryptionInfoBox;
+
+typedef struct __adobe_flash_access_params_box
+{
+ GF_ISOM_BOX
+ char *metadata; /*base-64 encoded metadata used by the DRM client to retrieve decrypted key*/
+} GF_AdobeFlashAccessParamsBox;
+
+typedef struct __adobe_key_info_box
+{
+ GF_ISOM_FULL_BOX
+ GF_AdobeFlashAccessParamsBox * params; /*spec: APSParamsBox will no longer be produced by conformaing applications*/
+} GF_AdobeKeyInfoBox;
+
+typedef struct __adobe_std_enc_params_box
+{
+ GF_ISOM_FULL_BOX
+ GF_AdobeEncryptionInfoBox *enc_info;
+ GF_AdobeKeyInfoBox *key_info;
+} GF_AdobeStdEncryptionParamsBox;
+
+typedef struct __adobe_drm_header_box
+{
+ GF_ISOM_FULL_BOX
+ GF_AdobeStdEncryptionParamsBox *std_enc_params;
+ //AdobeSignatureBox *signature; /*AdobeSignatureBox is not described*/
+} GF_AdobeDRMHeaderBox;
+
+
+typedef struct __adobe_drm_au_format_box
+{
+ GF_ISOM_FULL_BOX
+ u8 selective_enc;
+ u8 IV_length;
+} GF_AdobeDRMAUFormatBox;
+
+typedef struct __adobe_drm_key_management_system_box
+{
+ GF_ISOM_FULL_BOX
+ GF_AdobeDRMHeaderBox *header;
+ GF_AdobeDRMAUFormatBox *au_format;
+} GF_AdobeDRMKeyManagementSystemBox;
+
+
typedef struct
{
GF_ISOM_FULL_BOX
GF_Err stbl_RemoveSampleFragments(GF_SampleTableBox *stbl, u32 sampleNumber);
GF_Err stbl_RemoveRedundant(GF_SampleTableBox *stbl, u32 SampleNumber);
-/*expands sampleGroup table for the given grouping type and sample_number. If sample_number is 0, just appends an entry at the end of the table*/
-GF_Err gf_isom_add_sample_group_entry(GF_List *sampleGroups, u32 sample_number, u32 grouping_type, u32 sampleGroupDescriptionIndex);
+GF_Err gf_isom_copy_sample_group_entry_to_traf(GF_TrackFragmentBox *traf, GF_SampleTableBox *stbl, u32 grouping_type, u32 sampleGroupDescriptionIndex, Bool sgpd_in_traf);
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
GF_Err gf_isom_close_fragments(GF_ISOFile *movie);
/*rewrites avcC based on the given esd - this destroys the esd*/
GF_Err AVC_HEVC_UpdateESD(GF_MPEGVisualSampleEntryBox *avc, GF_ESD *esd);
+void AVC_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *avc, GF_MediaBox *mdia);
void AVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *avc);
+void HEVC_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *avc, GF_MediaBox *mdia);
void HEVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *avc);
GF_Err reftype_AddRefTrack(GF_TrackReferenceTypeBox *ref, u32 trackID, u16 *outRefIndex);
GF_Err prft_Read(GF_Box *s,GF_BitStream *bs);
GF_Err prft_dump(GF_Box *a, FILE * trace);
+//exported for sgpd comparison in traf merge
+void sgpd_write_entry(u32 grouping_type, void *entry, GF_BitStream *bs);
+
+/*
+ Adobe's protection boxes
+*/
+
+GF_Box *adkm_New();
+void adkm_del(GF_Box *s);
+GF_Err adkm_AddBox(GF_Box *s, GF_Box *a);
+GF_Err adkm_Read(GF_Box *s, GF_BitStream *bs);
+GF_Err adkm_Write(GF_Box *s, GF_BitStream *bs);
+GF_Err adkm_Size(GF_Box *s);
+GF_Err adkm_dump(GF_Box *a, FILE * trace);
+
+GF_Box *ahdr_New();
+void ahdr_del(GF_Box *s);
+GF_Err ahdr_AddBox(GF_Box *s, GF_Box *a);
+GF_Err ahdr_Read(GF_Box *s, GF_BitStream *bs);
+GF_Err ahdr_Write(GF_Box *s, GF_BitStream *bs);
+GF_Err ahdr_Size(GF_Box *s);
+GF_Err ahdr_dump(GF_Box *a, FILE * trace);
+
+GF_Box *aprm_New();
+void aprm_del(GF_Box *s);
+GF_Err aprm_AddBox(GF_Box *s, GF_Box *a);
+GF_Err aprm_Read(GF_Box *s, GF_BitStream *bs);
+GF_Err aprm_Write(GF_Box *s, GF_BitStream *bs);
+GF_Err aprm_Size(GF_Box *s);
+GF_Err aprm_dump(GF_Box *a, FILE * trace);
+
+GF_Box *aeib_New();
+void aeib_del(GF_Box *s);
+GF_Err aeib_Read(GF_Box *s, GF_BitStream *bs);
+GF_Err aeib_Write(GF_Box *s, GF_BitStream *bs);
+GF_Err aeib_Size(GF_Box *s);
+GF_Err aeib_dump(GF_Box *a, FILE * trace);
+
+GF_Box *akey_New();
+void akey_del(GF_Box *s);
+GF_Err akey_AddBox(GF_Box *s, GF_Box *a);
+GF_Err akey_Read(GF_Box *s, GF_BitStream *bs);
+GF_Err akey_Write(GF_Box *s, GF_BitStream *bs);
+GF_Err akey_Size(GF_Box *s);
+GF_Err akey_dump(GF_Box *a, FILE * trace);
+
+GF_Box *flxs_New();
+void flxs_del(GF_Box *s);
+GF_Err flxs_Read(GF_Box *s, GF_BitStream *bs);
+GF_Err flxs_Write(GF_Box *s, GF_BitStream *bs);
+GF_Err flxs_Size(GF_Box *s);
+GF_Err flxs_dump(GF_Box *a, FILE * trace);
+
+
+GF_Box *adaf_New();
+void adaf_del(GF_Box *s);
+GF_Err adaf_Read(GF_Box *s, GF_BitStream *bs);
+GF_Err adaf_Write(GF_Box *s, GF_BitStream *bs);
+GF_Err adaf_Size(GF_Box *s);
+GF_Err adaf_dump(GF_Box *a, FILE * trace);
+
#endif /*GPAC_DISABLE_ISOM*/
#ifdef __cplusplus
u32 num_units_in_tick, time_scale;
Bool poc_proportional_to_timing_flag;
u32 num_ticks_poc_diff_one_minus1;
+
+ u32 rep_format_idx;
} HEVC_SPS;
typedef struct
u32 column_width[22], row_height[20];
} HEVC_PPS;
+typedef struct RepFormat
+{
+ u32 chroma_format_idc;
+ u32 pic_width_luma_samples;
+ u32 pic_height_luma_samples;
+ u32 bit_depth_luma;
+ u32 bit_depth_chroma;
+ u8 separate_colour_plane_flag;
+} HEVC_RepFormat;
+
typedef struct
{
u16 avg_bit_rate, max_bit_rate, avg_pic_rate;
u8 constand_pic_rate_idc;
} HEVC_RateInfo;
+
+#define MAX_SHVC_LAYERS 4
typedef struct
{
s32 id;
/*used to discard repeated SPSs - 0: not parsed, 1 parsed, 2 stored*/
u32 state;
u32 crc;
- u8 max_sub_layer;
+ u32 max_layers, max_sub_layers, max_layer_id, num_layer_sets;
Bool temporal_id_nesting;
HEVC_ProfileTierLevel ptl;
HEVC_SublayerPTL sub_ptl[8];
HEVC_RateInfo rates[8];
+
+ u32 scalability_mask[16];
+ u32 dimension_id[MAX_SHVC_LAYERS][16];
+ u32 layer_id_in_nuh[MAX_SHVC_LAYERS];
+ u32 layer_id_in_vps[MAX_SHVC_LAYERS];
+
+
+ u32 profile_level_tier_idx[MAX_SHVC_LAYERS];
+ HEVC_ProfileTierLevel ext_ptl[MAX_SHVC_LAYERS];
+
+ u32 num_rep_formats;
+ HEVC_RepFormat rep_formats[16];
+ u32 rep_format_idx[16];
} HEVC_VPS;
typedef struct
HEVC_PPS *pps;
} HEVCSliceInfo;
-typedef struct
+typedef struct _hevc_state
{
HEVC_SPS sps[16]; /* range allowed in the spec is 0..15 */
s8 sps_active_idx; /*currently active sps; must be initalized to -1 in order to discard not yet decodable SEIs*/
Bool gf_media_hevc_slice_is_intra(HEVCState *hevc);
Bool gf_media_hevc_slice_is_IDR(HEVCState *hevc);
+GF_Err gf_hevc_get_sps_info_with_state(HEVCState *hevc_state, char *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d);
#endif /*GPAC_DISABLE_AV_PARSERS*/
GF_MPD_Type type;
u64 availabilityStartTime; /* expressed in milliseconds */ /*MANDATORY if type=dynamic*/
u64 availabilityEndTime;/* expressed in milliseconds */
+ u64 publishTime;/* expressed in milliseconds */
u32 media_presentation_duration; /* expressed in milliseconds */ /*MANDATORY if type=static*/
u32 minimum_update_period; /* expressed in milliseconds */
u32 min_buffer_time; /* expressed in milliseconds */ /*MANDATORY*/
extern int synBytes[MAXDEG];
/* print debugging info */
-extern int DEBUG;
+extern int RS_DEBUG;
/* Reed Solomon encode/decode routines */
void initialize_ecc (void);
/*real scene time callback*/
Double (*GetSceneTime)(void *userpriv);
-
-
/*parent scene if any*/
struct __tag_scene_graph *parent_scene;
GF_Err gf_dom_event_remove_listener_from_parent(GF_DOMEventTarget *event_target, GF_Node *listener);
/* returns associated DOMEventtarget for an HTML/SVG media element, or NULL otherwise*/
-GF_DOMEventTarget *gf_html_media_get_event_target_from_node(GF_Node *n);
+GF_DOMEventTarget *gf_dom_event_get_target_from_node(GF_Node *n);
#ifdef __cplusplus
}
#endif
#endif
+#if defined(DEBUG) && defined(GPAC_CONFIG_DARWIN)
+#undef DEBUG
+#endif
+
#include <jsapi.h>
#ifndef JS_VERSION
typedef struct _es_channel GF_Channel;
typedef struct _generic_codec GF_Codec;
typedef struct _composition_memory GF_CompositionMemory;
+typedef struct _gf_addon_media GF_AddonMedia;
struct _net_service
struct _tag_terminal *term;
/*service url*/
char *url;
+ /*service mime type*/
+ char *mime;
/*od_manager owning service, NULL for services created for remote channels*/
struct _od_manager *owner;
/*number of attached remote channels ODM (ESD URLs)*/
/*URLs of current video, audio and subs (we can't store objects since they may be destroyed when seeking)*/
SFURL visual_url, audio_url, text_url, dims_url;
+ Bool end_of_scene;
#ifndef GPAC_DISABLE_VRML
/*list of externproto libraries*/
GF_List *extern_protos;
/*list of M_KeyNavigator nodes*/
GF_List *keynavigators;
#endif
+
+
+ GF_AddonMedia *active_addon;
+ GF_List *declared_addons;
};
GF_Scene *gf_scene_new(GF_Scene *parentScene);
Bool gf_scene_check_clocks(GF_ClientService *ns, GF_Scene *scene);
-void gf_scene_notify_event(GF_Scene *scene, u32 event_type, GF_Node *n, void *dom_evt, GF_Err code);
+void gf_scene_notify_event(GF_Scene *scene, u32 event_type, GF_Node *n, void *dom_evt, GF_Err code, Bool no_queueing);
void gf_scene_mpeg4_inline_restart(GF_Scene *scene);
/*signal that next AU is an AU start*/
Bool NextIsAUStart;
/*if codec resilient, packet drops are not considered as fatal for AU reconstruction (eg no wait for RAP)*/
- Bool codec_resilient;
+ u32 codec_resilient;
/*when starting a channel, the first AU is ALWAYS fetched when buffering - this forces
BIFS and OD to be decoded and first frame render, in order to detect media objects that would also need
buffering - note this doesn't affect the clock, it is still paused if buffering*/
Bool last_au_was_seek;
Bool no_timestamps;
- u32 prev_aggregated_dts;
Bool pull_forced_buffer;
u8 last_unit_signature[20];
/*in case the codec performs temporal re-ordering itself*/
Bool is_reordering;
+ /*codec will properly handle CTS adjustments*/
+ Bool trusted_cts;
u32 prev_au_size;
u32 bytes_per_sec;
Double fps;
/*statistics*/
u32 last_stat_start, cur_bit_size, tot_bit_size, stat_start;
u32 avg_bit_rate, max_bit_rate;
- u32 total_dec_time, nb_dec_frames, max_dec_time;
+ u32 nb_dec_frames;
+ u64 total_dec_time, max_dec_time;
u32 first_frame_time, last_frame_time;
/*number of frames dropped at the presentation*/
u32 nb_droped;
/*the media sensor(s) attached to this object*/
GF_List *ms_stack;
#endif
+
+ //only set on root OD of addon subscene, which gather all the hybrid resources
+ GF_AddonMedia *addon;
+ //set to true if this is a scalable addon for an existing object
+ Bool scalable_addon;
+
+ //for a regular ODM, this indicates that the current scalable_odm associated
+ struct _od_manager *scalable_odm;
};
/*frame presentation time*/
u32 timestamp;
/*time in ms until next frame shall be presented*/
- u32 ms_until_next;
+ s32 ms_until_next;
s32 ms_until_pres;
/*data frame size*/
u32 framesize;
void gf_scene_generate_views(GF_Scene *scene, char *url, char *parent_url);
+void gf_scene_register_associated_media(GF_Scene *scene, GF_AssociatedContentLocation *addon_info);
+void gf_scene_notify_associated_media_timeline(GF_Scene *scene, GF_AssociatedContentTiming *addon_time);
+u32 gf_scene_adjust_time_for_addon(GF_Scene *scene, u32 clock_time, GF_AddonMedia *addon);
+u64 gf_scene_adjust_timestamp_for_addon(GF_Scene *scene, u64 orig_ts, GF_AddonMedia *addon);
+void gf_scene_select_scalable_addon(GF_Scene *scene, GF_ObjectManager *odm);
+
+struct _gf_addon_media
+{
+ char *url;
+ GF_ObjectManager *root_od;
+ s32 timeline_id;
+ u32 is_splicing;
+ //in scene time
+ Double activation_time;
+
+ Bool enabled;
+ Bool timeline_ready;
+
+ u32 media_timescale;
+ u64 media_timestamp;
+ u64 media_pts;
+
+ //0: not scalable
+ //1: layered coding scalable enhancement (reassembly before the decoder)
+ //2: view enhancement (reassembly after the decoder)
+ u32 scalable_type;
+};
+
GF_Err gf_codec_process_private_media(GF_Codec *codec, u32 TimeAvailable);
+
+Bool gf_codec_is_scene_or_image(GF_Codec *codec);
+
#ifdef __cplusplus
}
#endif
typedef struct
{
- /*0: ISMACryp - 1: OMA DRM - 2: CENC CTR - 3: CENC CBC*/
+ /*0: ISMACryp - 1: OMA DRM - 2: CENC CTR - 3: CENC CBC - 4: ADOBE*/
u32 enc_type;
u32 trackID;
unsigned char key[16];
u32 defaultKeyIdx;
u32 keyRoll;
+ char metadata[5000];
+ u32 metadata_len;
+
} GF_TrackCryptInfo;
#if !defined(GPAC_DISABLE_MCRYPT) && !defined(GPAC_DISABLE_ISOM_WRITE)
GF_Err gf_cbc_encrypt_track(GF_ISOFile *mp4, GF_TrackCryptInfo *tci, void (*progress)(void *cbk, u64 done, u64 total), void *cbk);
GF_Err gf_cbc_decrypt_track(GF_ISOFile *mp4, GF_TrackCryptInfo *tci, void (*progress)(void *cbk, u64 done, u64 total), void *cbk);
+/*ADOBE*/
+GF_Err gf_adobe_encrypt_track(GF_ISOFile *mp4, GF_TrackCryptInfo *tci, void (*progress)(void *cbk, u64 done, u64 total), void *cbk);
+GF_Err gf_adobe_decrypt_track(GF_ISOFile *mp4, GF_TrackCryptInfo *tci, void (*progress)(void *cbk, u64 done, u64 total), void *cbk);
+
GF_Err (*gf_encrypt_track)(GF_ISOFile *mp4, GF_TrackCryptInfo *tci, void (*progress)(void *cbk, u64 done, u64 total), void *cbk);
GF_Err (*gf_decrypt_track)(GF_ISOFile *mp4, GF_TrackCryptInfo *tci, void (*progress)(void *cbk, u64 done, u64 total), void *cbk);
GF_ISOM_CENC_SCHEME = GF_4CC('c','e','n','c'),
/* Encryption Scheme Type in the SchemeTypeInfoBox */
- GF_ISOM_CBC_SCHEME = GF_4CC('c','b','c','1')
+ GF_ISOM_CBC_SCHEME = GF_4CC('c','b','c','1'),
+
+ /* Encryption Scheme Type in the SchemeTypeInfoBox */
+ GF_ISOM_ADOBE_SCHEME = GF_4CC('a','d','k','m'),
};
0: not random access
1: regular RAP,
2: sample is a redundant RAP. If set when adding the sample, this will create a sample dependency entry
+ 3: specific RAP (CRA/BLA in HEVC)
*/
u8 IsRAP;
} GF_ISOSample;
GF_Err gf_isom_remove_samp_group_box(GF_ISOFile *the_file, u32 trackNumber);
GF_Err gf_isom_remove_pssh_box(GF_ISOFile *the_file);
+Bool gf_isom_is_adobe_protection_media(GF_ISOFile *the_file, u32 trackNumber, u32 sampleDescriptionIndex);
+GF_Err gf_isom_get_adobe_protection_info(GF_ISOFile *the_file, u32 trackNumber, u32 sampleDescriptionIndex, u32 *outOriginalFormat, u32 *outSchemeType, u32 *outSchemeVersion);
+GF_Err gf_isom_set_adobe_protection(GF_ISOFile *the_file, u32 trackNumber, u32 desc_index, u32 scheme_type, u32 scheme_version, Bool is_selective_enc, char *metadata, u32 len);
+
void gf_isom_ipmpx_remove_tool_list(GF_ISOFile *the_file);
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_Err gf_isom_fragment_add_subsample(GF_ISOFile *movie, u32 TrackID, u32 subSampleSize, u8 priority, u32 reserved, Bool discardable);
/*copy over the subsample and sampleToGroup information of the given sample from the source track/file to the last sample added to the current track fragment of the destination file*/
-GF_Err gf_isom_fragment_copy_subsample(GF_ISOFile *dest, u32 TrackID, GF_ISOFile *orig, u32 track, u32 sampleNumber);
+GF_Err gf_isom_fragment_copy_subsample(GF_ISOFile *dest, u32 TrackID, GF_ISOFile *orig, u32 track, u32 sampleNumber, Bool sgpd_in_traf);
/*gets the number of the next moof to be produced*/
u32 gf_isom_get_next_moof_number(GF_ISOFile *movie);
#include <gpac/setup.h>
-/*NOTE: there is a conflict on Win32 VC6 with C++ and gpac headers when including <math.h>*/
-#if !defined(__cplusplus) || defined(__SYMBIAN32__)
#include <math.h>
-#endif
/*!
#ifndef GPAC_DISABLE_ISOM_WRITE
/*changes pixel aspect ratio for visual tracks if supported. Negative values remove any PAR info*/
GF_Err gf_media_change_par(GF_ISOFile *file, u32 track, s32 ar_num, s32 ar_den);
+GF_Err gf_media_remove_non_rap(GF_ISOFile *file, u32 track);
#endif
u32 nb_tracks;
/*track info after probing (GF_IMPORT_PROBE_ONLY set).*/
struct __track_import_info tk_info[GF_IMPORT_MAX_TRACKS];
+ u64 probe_duration;
/*for MPEG-TS and similar: program names*/
u32 nb_progs;
Bool segments_start_with_rap, Double dash_duration_sec, char *seg_rad_name, char *seg_ext, u32 segment_marker_4cc,
Double frag_duration_sec, s32 subsegs_per_sidx, Bool daisy_chain_sidx, Bool fragments_start_with_rap, const char *tmp_dir,
GF_Config *dash_ctx, u32 dash_dynamic, u32 mpd_update_time, u32 time_shift_depth, Double subduration, Double min_buffer,
- u32 ast_shift_sec, u32 dash_scale, Bool fragments_in_memory, u32 initial_moof_sn, u64 initial_tfdt, Bool no_fragments_defaults, Bool pssh_moof);
+ u32 ast_shift_sec, u32 dash_scale, Bool fragments_in_memory, u32 initial_moof_sn, u64 initial_tfdt, Bool no_fragments_defaults, Bool pssh_moof, Bool samplegroups_in_traf);
/*returns time to wait until end of currently generated segments*/
u32 gf_dasher_next_update_time(GF_Config *dash_ctx, u32 mpd_update_time);
/*fetch media data
*/
-char *gf_mo_fetch_data(GF_MediaObject *mo, Bool resync, Bool *eos, u32 *timestamp, u32 *size, s32 *ms_until_pres, u32 *ms_until_next);
+char *gf_mo_fetch_data(GF_MediaObject *mo, Bool resync, Bool *eos, u32 *timestamp, u32 *size, s32 *ms_until_pres, s32 *ms_until_next);
-/*release given amount of media data - nb_bytes is used for audio - if forceDrop is set, the unlocked frame will be
-droped if all bytes are consumed, otherwise it will be droped based on object time - typically, video fetches with the resync
-flag set and release without forceDrop, while audio fetches without resync but forces buffer drop. If forceDrop is set to 2,
-the frame will be stated as a discraded frame*/
-void gf_mo_release_data(GF_MediaObject *mo, u32 nb_bytes, s32 forceDrop);
+/*release given amount of media data - nb_bytes is used for audio - drop_mode can take the following values:
+-1: do not drop
+0: do not force drop: the unlocked frame it will be droped based on object time (typically video)
+1: force drop : the unlocked frame will be droped if all bytes are consumed (typically audio)
+2: the frame will be stated as a discraded frame
+*/
+void gf_mo_release_data(GF_MediaObject *mo, u32 nb_bytes, s32 drop_mode);
/*get media time*/
void gf_mo_get_media_time(GF_MediaObject *mo, u32 *media_time, u32 *media_dur);
/*get object clock*/
GF_CODEC_LEVEL_SEEK
};
+
+/*codec resilience type*/
+enum
+{
+ GF_CODEC_NOT_RESILIENT=0,
+ GF_CODEC_RESILIENT_ALWAYS=1,
+ GF_CODEC_RESILIENT_AFTER_FIRST_RAP=2
+};
+
+
/*the structure for capabilities*/
typedef struct
{
/*size of a single composition unit */
GF_CODEC_OUTPUT_SIZE = 0x01,
/*resilency: if packets are lost within an AU, resilience means the AU won't be discarded and the codec
- will try to decode */
+ will try to decode
+ 0: not resilient
+ 1: resilient
+ 2: resilient after first rap
+ */
GF_CODEC_RESILIENT,
/*critical level of composition memory - if below, media management for the object */
GF_CODEC_BUFFER_MIN,
GF_CODEC_PAR,
/*video color mode - color modes are defined in constants.h*/
GF_CODEC_PIXEL_FORMAT,
- /*isgnal decoder performs frame re-ordering in temporal scalability*/
+ /*signal decoder performs frame re-ordering in temporal scalability*/
GF_CODEC_REORDER,
-
+ /*signal decoder can safely handle CTS when outputing a picture. If not supported by the
+ decoder, the terminal will automatically handle CTS adjustments*/
+ GF_CODEC_TRUSTED_CTS,
+
+ /*set cap only, indicate smax bpp of display*/
+ GF_CODEC_DISPLAY_BPP,
+
/*Audio sample rate*/
GF_CODEC_SAMPLERATE,
/*Audio num channels*/
after the last AU). Otherwise the decoder will be stopped and ask to remove any extra scene being displayed*/
GF_CODEC_MEDIA_NOT_OVER,
- /*switches up or down media quality for scalable coding*/
+ /*switches up (1), max (2), down (0) or min (-1) media quality for scalable coding. */
GF_CODEC_MEDIA_SWITCH_QUALITY,
/*special cap indicating the codec should abort processing as soon as possible because it is about to be destroyed*/
} GF_BaseDecoder;
/*interface name and version for media decoder */
-#define GF_MEDIA_DECODER_INTERFACE GF_4CC('G', 'M', 'D', '2')
+#define GF_MEDIA_DECODER_INTERFACE GF_4CC('G', 'M', 'D', '3')
/*the media module interface. A media module MUST be implemented in synchronous mode as time
and resources management is done by the terminal*/
@mmlevel: speed indicator for the decoding - cf above for values*/
GF_Err (*ProcessData)(struct _mediadecoder *,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel);
GF_NET_SERVICE_QUERY_INIT_RANGE,
/* When using proxy between an input module and the terminal, exchange status using this command: input -> proxy */
GF_NET_SERVICE_STATUS_PROXY,
- /*When using DASH or playlists, indicates that DATA (chunk) has been received by the playlist proxy*/
- GF_NET_SERVICE_PROXY_CHUNK_RECEIVE,
- /*When using DASH or playlists, indicates that complete segment has been received by the playlist proxy*/
- GF_NET_SERVICE_PROXY_SEGMENT_RECEIVE,
+
+ /*When using DASH or playlists, indicates that DATA (chunk or complete file ) has been received by the playlist proxy*/
+ GF_NET_SERVICE_PROXY_DATA_RECEIVE,
/*query screen capabilities*/
GF_NET_SERVICE_MEDIA_CAP_QUERY,
+ /*signal an associated content is announced (from service to term). This does not have to be filtered by the service
+ the terminal will handle this.*/
+ GF_NET_ASSOCIATED_CONTENT_LOCATION,
+ /*signal associated content timeline (from service to term)*/
+ GF_NET_ASSOCIATED_CONTENT_TIMING,
+
+ //sets nalu mode
+ GF_NET_CHAN_NALU_MODE,
} GF_NET_CHAN_CMD;
/*channel command for all commands that don't need params:
u32 padding_bytes;
} GF_NetComPadding;
+
+/*GF_NET_SERVICE_PROXY_DATA_RECEIVE*/
+typedef struct
+{
+ u32 command_type;
+ LPNETCHANNEL on_channel;
+ Bool is_chunk;
+ Bool is_live;
+} GF_NetComProxyData;
+
/*GF_NET_CHAN_MAP_TIME*/
typedef struct
{
LPNETCHANNEL channel;
u32 width;
u32 height;
- u32 bpp;
+ //max bits per color channel
+ u32 display_bit_depth;
+ u32 audio_bpp;
u32 channels;
u32 sample_rate;
const char *mime_query;
Bool mime_supported;
} GF_MediaCapQuery;
+
+/*GF_NET_ASSOCIATED_CONTENT_LOCATION*/
+typedef struct
+{
+ u32 command_type;
+ LPNETCHANNEL channel;
+
+ s32 timeline_id;
+ const char *external_URL;
+ Bool is_announce, is_splicing;
+ Bool reload_external;
+ Double activation_countdown;
+} GF_AssociatedContentLocation;
+
+/*GF_NET_ASSOCIATED_CONTENT_TIMING*/
+typedef struct
+{
+ u32 command_type;
+ LPNETCHANNEL channel;
+
+ u32 timeline_id;
+ u32 media_timescale;
+ u64 media_timestamp;
+ //for now only used in MPEG-2, so media_pts is in 90khz scale
+ u64 media_pts;
+ Bool force_reload;
+ Bool is_paused;
+ Bool is_discontinuity;
+} GF_AssociatedContentTiming;
+
+/*GF_NET_CHAN_NALU_MODE*/
+typedef struct
+{
+ u32 command_type;
+ LPNETCHANNEL channel;
+
+ //mode 0: extract in ISOBMF format (nalu size field + nalu)
+ //mode 1: extract in Annex B format (start code + nalu)
+ u32 extract_mode;
+} GF_NALUExtractMode;
+
typedef union __netcommand
{
GF_NET_CHAN_CMD command_type;
GF_NetQualitySwitch switch_quality;
GF_NetServiceStatus status;
GF_MediaCapQuery mcaps;
+ GF_NetComProxyData proxy_data;
+ GF_AssociatedContentLocation addon_info;
+ GF_AssociatedContentTiming addon_time;
+ GF_NALUExtractMode nalu_mode;
} GF_NetworkCommand;
/*
u32 max_screen_width, max_screen_height;
/* dpi of the screen*/
u32 dpi_x, dpi_y;
+ /* max bits per color channel*/
+ u32 max_screen_bpp;
/*overlay color key used by the hardware bliter - if not set, only top-level overlay can be used*/
u32 overlay_color_key;
GF_List *param_array;
- //set by libisomedia at impport/export time
+ //set by libisomedia at import/export time
Bool is_shvc;
//used in SHVC config
GF_M2TS_MPEG4_SL_DESCRIPTOR = 0x1E,
GF_M2TS_MPEG4_FMC_DESCRIPTOR = 0x1F,
/* ... */
+ GF_M2TS_METADATA_POINTER_DESCRIPTOR = 0x25,
+ GF_M2TS_METADATA_DESCRIPTOR = 0x26,
+ /* ... */
GF_M2TS_AVC_VIDEO_DESCRIPTOR = 0x28,
/* ... */
GF_M2TS_AVC_TIMING_HRD_DESCRIPTOR = 0x2A,
GF_M2TS_SYSTEMS_MPEG4_PES = 0x12,
GF_M2TS_SYSTEMS_MPEG4_SECTIONS = 0x13,
+ GF_M2TS_METADATA_PES = 0x15,
+
GF_M2TS_VIDEO_H264 = 0x1B,
GF_M2TS_VIDEO_SVC = 0x1F,
GF_M2TS_VIDEO_HEVC = 0x24,
GF_M2TS_DVB_TELETEXT = 0x152,
GF_M2TS_DVB_VBI = 0x153,
GF_M2TS_DVB_SUBTITLE = 0x154,
+ GF_M2TS_METADATA_ID3_HLS = 0x155,
};
/*skip pes processing: all transport packets related to this stream are discarded*/
GF_M2TS_PES_FRAMING_SKIP,
/*same as GF_M2TS_PES_FRAMING_SKIP but keeps internal PES buffer alive*/
- GF_M2TS_PES_FRAMING_SKIP_NO_RESET
+ GF_M2TS_PES_FRAMING_SKIP_NO_RESET,
+ /*same as defualt PES framing but forces nal-per-nal dispatch for AVC/HEVC (default mode may dispatch complete frames)*/
+ GF_M2TS_PES_FRAMING_DEFAULT_NAL,
};
/*PES packet flags*/
GF_M2TS_EVT_AIT_FOUND,
/*DSCM-CC has been found (carousel) */
GF_M2TS_EVT_DSMCC_FOUND,
+
+ /*a TEMI locator has been found or repeated*/
+ GF_M2TS_EVT_TEMI_LOCATION,
+ /*a TEMI timecode has been found*/
+ GF_M2TS_EVT_TEMI_TIMECODE,
+
GF_M2TS_EVT_EOS,
-
};
enum
gf_m2ts_section_callback process_section;
} GF_M2TS_SectionFilter;
+enum metadata_carriage {
+ METADATA_CARRIAGE_SAME_TS = 0,
+ METADATA_CARRIAGE_DIFFERENT_TS = 1,
+ METADATA_CARRIAGE_PS = 2,
+ METADATA_CARRIAGE_OTHER = 3
+};
+
+typedef struct tag_m2ts_metadata_pointer_descriptor {
+ u16 application_format;
+ u32 application_format_identifier;
+ u8 format;
+ u32 format_identifier;
+ u8 service_id;
+ Bool locator_record_flag;
+ u32 locator_length;
+ char *locator_data;
+ enum metadata_carriage carriage_flag;
+ u16 program_number;
+ u16 ts_location;
+ u16 ts_id;
+ char *data;
+ u32 data_size;
+} GF_M2TS_MetadataPointerDescriptor;
+
+typedef struct
+{
+ u32 timeline_id;
+ //for now we only support one URL announcement
+ const char *external_URL;
+ Bool is_announce, is_splicing;
+ Bool reload_external;
+ Double activation_countdown;
+} GF_M2TS_TemiLocationDescriptor;
+
+typedef struct
+{
+ u32 timeline_id;
+ u32 media_timescale;
+ u64 media_timestamp;
+ u64 pes_pts;
+ Bool force_reload;
+ Bool is_paused;
+ Bool is_discontinuity;
+} GF_M2TS_TemiTimecodeDescriptor;
/*MPEG-2 TS program object*/
u32 pid_playing;
Bool is_scalable;
+
+ GF_M2TS_MetadataPointerDescriptor *metadata_pointer_descriptor;
} GF_M2TS_Program;
/*ES flags*/
u8 page_number;
} GF_M2TS_DVB_Teletext_Descriptor;
+typedef struct tag_m2ts_metadata_descriptor {
+ u16 application_format;
+ u32 application_format_identifier;
+ u8 format;
+ u32 format_identifier;
+ u8 service_id;
+ u8 decoder_config_flags;
+ Bool dsmcc_flag;
+ u8 service_id_record_length;
+ char *service_id_record;
+ u8 decoder_config_length;
+ char *decoder_config;
+ u8 decoder_config_id_length;
+ char *decoder_config_id;
+ u8 decoder_config_service_id;
+} GF_M2TS_MetadataDescriptor;
+
/*MPEG-2 TS ES object*/
typedef struct tag_m2ts_pes
{
/*mpegts lib private - do not touch :)*/
/*PES re-assembler*/
- unsigned char *data;
+ unsigned char *pck_data;
+ /*amount of bytes allocated for data */
+ u32 pck_alloc_len;
/*amount of bytes received in the current PES packet (NOT INCLUDING ANY PENDING BYTES)*/
- u32 data_len;
- /*size of the PES packet being recevied*/
+ u32 pck_data_len;
+ /*size of the PES packet being received, as indicated in pes header length field - can be 0 if unknown*/
u32 pes_len;
Bool rap;
u64 PTS, DTS;
/*PES reframer - if NULL, pes processing is skiped*/
/*returns the number of bytes NOT consummed from the input data buffer - these bytes are kept when reassembling the next PES packet*/
- u32 (*reframe)(struct tag_m2ts_demux *ts, struct tag_m2ts_pes *pes, Bool same_pts, unsigned char *data, u32 data_len);
+ u32 (*reframe)(struct tag_m2ts_demux *ts, struct tag_m2ts_pes *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *hdr);
+ Bool single_nal_mode;
/*used by several reframers to store their parsing state*/
u32 frame_state;
/*LATM stuff - should be moved out of mpegts*/
u64 prev_PTS;
GF_M2TS_DVB_Subtitling_Descriptor sub;
+ GF_M2TS_MetadataDescriptor *metadata_descriptor;
+
+
+ char *temi_tc_desc;
+ u32 temi_tc_desc_len, temi_tc_desc_alloc_size;
} GF_M2TS_PES;
/*SDT information object*/
u32 data_len;
u32 flags;
u64 cts, dts;
+ u32 duration;
char *mpeg2_af_descriptors;
u32 mpeg2_af_descriptors_size;
u32 last_sys_clock;
u64 initial_ts;
Bool initial_ts_set;
+ Bool pcr_init_time_set;
u32 pcr_offset;
GF_Descriptor *iod;
/*function called when the a "set dirty" propagates to root node of the graph
ctxdata is not used*/
GF_SG_CALLBACK_GRAPH_DIRTY,
+ //node is being destroyed
+ GF_SG_CALLBACK_NODE_DESTROY,
};
/*set node callback: function called upon node creation.
{
GF_DOM_EVENT_TARGET_NODE,
GF_DOM_EVENT_TARGET_DOCUMENT,
- GF_DOM_EVENT_TARGET_HTML_MEDIA,
GF_DOM_EVENT_TARGET_MSE_MEDIASOURCE,
GF_DOM_EVENT_TARGET_MSE_SOURCEBUFFERLIST,
GF_DOM_EVENT_TARGET_MSE_SOURCEBUFFER,
BE CAREFULL: event execution may very well destroy ANY node, especially the event target node !!
*/
Bool gf_dom_event_fire(GF_Node *node, GF_DOM_Event *event);
-Bool sg_fire_dom_event(GF_DOMEventTarget *et, GF_DOM_Event *event, GF_SceneGraph *sg, GF_Node *n);
+Bool gf_sg_fire_dom_event(GF_DOMEventTarget *et, GF_DOM_Event *event, GF_SceneGraph *sg, GF_Node *n);
/*fires event on the specified node
BE CAREFULL: event execution may very well destroy ANY node, especially the event target node !!
#include <stdio.h>
#include <stdlib.h>
+#if defined(_WIN64) && !defined(GPAC_64_BITS)
+#define GPAC_64_BITS
+#endif
typedef unsigned __int64 u64;
typedef unsigned int u32;
/*version_number are pushed from m2ts sections to the mpeg4sl layer so as to handle mpeg4 stream dependencies*/
u8 m2ts_version_number_plus_one;
u8 m2ts_pcr;
+ /* HTML5 MSE Packet info */
+ s64 timeStampOffset;
} GF_SLHeader;
u32 db_unit_count;
/*number of CUs in composition memory (if any) and CM capacity*/
u16 cb_unit_count, cb_max_count;
+ /*inidciate that thye composition memory is bypassed for this decoder (video only) */
+ Bool direct_video_memory;
/*clock drift in ms of object clock: this is the delay set by the audio renderer to keep AV in sync*/
s32 clock_drift;
/*codec name*/
/*average birate over last second and max bitrate over one second at decoder input - expressed in bits per sec*/
u32 avg_bitrate, instant_bitrate, max_bitrate;
- u32 total_dec_time, max_dec_time, nb_dec_frames, nb_droped;
+ u32 nb_dec_frames, nb_droped;
u32 first_frame_time, last_frame_time;
+ u64 max_dec_time, total_dec_time;
/*set if ISMACryp present on the object - will need refinement for IPMPX...
0: not protected - 1: protected and OK - 2: protected and DRM failed*/
* \brief System clock query
*
* Gets the system clock time.
- * \return System clock value since initialization in milliseconds.
+ * \return System clock value since GPAC initialization in milliseconds.
*/
u32 gf_sys_clock();
+/*!
+ * \brief High precision system clock query
+ *
+ * Gets the hight precision system clock time.
+ * \return System clock value since GPAC initialization in microseconds.
+ */
+u64 gf_sys_clock_high_res();
+
/*!
* \brief Sleeps thread/process
*
#define GPAC_VERSION_MICRO 0
#include <gpac/revision.h>
-#define GPAC_FULL_VERSION GPAC_VERSION"-rev"GPAC_SVN_REVISION
+#define GPAC_FULL_VERSION GPAC_VERSION "-rev" GPAC_SVN_REVISION
#endif //_GF_VERSION_H
endif
ifeq ($(DISABLE_SVG), no)
-PLUGDIRS+=laser_dec svg_in vtt_in
+PLUGDIRS+=laser_dec svg_in
+ifeq ($(DISABLE_TTXT), no)
+PLUGDIRS+=vtt_in
+endif
+
ifneq ($(CONFIG_ZLIB), no)
PLUGDIRS+=widgetman
ifeq ($(DISABLE_LOADER_BT),no)
static GF_Err FAAD_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
if (read->needs_connection) {
read->needs_connection = 0;
- bs = gf_bs_new(read->data, read->data_size, GF_BITSTREAM_READ);
+ bs = gf_bs_new((char *) read->data, read->data_size, GF_BITSTREAM_READ);
sync = gf_ac3_parser_bs(bs, &hdr, 1);
gf_bs_del(bs);
if (!sync) return;
/*need a full ac3 header*/
if (read->data_size<=7) return;
- bs = gf_bs_new(read->data, read->data_size, GF_BITSTREAM_READ);
+ bs = gf_bs_new((char *) read->data, read->data_size, GF_BITSTREAM_READ);
hdr.framesize = 0;
pos = 0;
while (gf_ac3_parser_bs(bs, &hdr, 0)) {
read->sl_hdr.AU_sequenceNumber++;
read->sl_hdr.compositionTimeStampFlag = 1;
read->sl_hdr.compositionTimeStamp += 1536;
- gf_term_on_sl_packet(read->service, read->ch, read->data + pos, hdr.framesize, &read->sl_hdr, GF_OK);
+ gf_term_on_sl_packet(read->service, read->ch, (char *) read->data + pos, hdr.framesize, &read->sl_hdr, GF_OK);
gf_bs_skip_bytes(bs, hdr.framesize);
}
gf_bs_del(bs);
if (pos) {
- char *d;
+ u8 *d;
read->data_size -= (u32) pos;
d = gf_malloc(sizeof(char) * read->data_size);
memcpy(d, read->data + pos, sizeof(char) * read->data_size);
read->sl_hdr.compositionTimeStamp = read->current_time;
read->data = gf_malloc(sizeof(char) * (read->data_size+read->pad_bytes));
- gf_bs_read_data(bs, read->data, read->data_size);
+ gf_bs_read_data(bs, (char *) read->data, read->data_size);
if (read->pad_bytes) memset(read->data + read->data_size, 0, sizeof(char) * read->pad_bytes);
gf_bs_del(bs);
}
*out_sl_hdr = read->sl_hdr;
- *out_data_ptr = read->data;
+ *out_data_ptr =(char *) read->data;
*out_data_size = read->data_size;
return GF_OK;
}
static GF_Err AC3_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
static GF_Err AMR_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
static GF_Err AMR_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
}
fclose(t);
}
- /*remap to remote URL*/
- remote = gf_strdup(mux->file_name);
+ /*remap to remote URL - warning, the URL has already been resolved according to the parent path*/
+ remote = gf_malloc(sizeof(char) * (strlen("gpac://")+strlen(mux->file_name)+1) );
+ strcpy(remote, "gpac://");
+ strcat(remote, mux->file_name);
k = od->objectDescriptorID;
/*if files were created we'll have to clean up (swf import)*/
if (mux->delete_file) gf_list_add(priv->files_to_delete, gf_strdup(remote));
}
}
-
-static void write_yv12_10_to_yuv(GF_VideoSurface *vs, unsigned char *pY, u32 src_stride, u32 src_pf,
- u32 src_width, u32 src_height, const GF_Window *src_wnd, unsigned char *pU, unsigned char*pV)
-{
- u32 i, j;
- if (!pU) {
- pU = pY + src_stride * src_height;
- pV = pY + 5*src_stride * src_height/4;
- }
-
- pY = pY + src_stride * src_wnd->y + src_wnd->x;
- /*because of U and V downsampling by 2x2, working with odd Y offset will lead to a half-line shift between Y and UV components. We
- therefore force an even Y offset for U and V planes.*/
- pU = pU + (src_stride * (src_wnd->y / 2) + src_wnd->x) / 2;
- pV = pV + (src_stride * (src_wnd->y / 2) + src_wnd->x) / 2;
-
- if (vs->pixel_format == GF_PIXEL_YV12) {
- for (i=0; i<src_wnd->h; i++) {
- u16 *src = (u16 *) (pY + i*src_stride);
- u8 *dst = vs->video_buffer + i*vs->pitch_y;
-
- for (j=0; j<src_wnd->w;j++) {
- *dst = (*src) >> 2;
- dst++;
- src++;
- }
- }
-
- for (i=0; i<src_wnd->h/2; i++) {
- u16 *src = (u16 *) (pV + i*src_stride/2);
- u8 *dst = vs->video_buffer + vs->pitch_y * vs->height + i*vs->pitch_y/2;
-
- for (j=0; j<src_wnd->w/2;j++) {
- *dst = (*src) >> 2;
- dst++;
- src++;
- }
- }
-
- for (i=0; i<src_wnd->h/2; i++) {
- u16 *src = (u16 *) (pU + i*src_stride/2);
- u8 *dst = vs->video_buffer + 5*vs->pitch_y * vs->height/4 + i*vs->pitch_y/2;
-
- for (j=0; j<src_wnd->w/2;j++) {
- *dst = (*src) >> 2;
- dst++;
- src++;
- }
- }
- }
-}
-
static void write_yvyu_to_yuv(GF_VideoSurface *vs, unsigned char *src, u32 src_stride, u32 src_pf,
u32 src_width, u32 src_height, const GF_Window *src_wnd)
{
} else if (get_yuv_base(src_s->pixel_format)==GF_PIXEL_YV12_10) {
if (format_is_yuv(dst_s->pixel_format)) {
/*generic YV planar to YUV (planar or not) */
- write_yv12_10_to_yuv(dst_s, src_s->video_buffer, src_s->pitch_y, src_s->pixel_format, src_s->width, src_s->height, src_wnd, src_s->u_ptr, src_s->v_ptr);
+ gf_color_write_yv12_10_to_yuv(dst_s, src_s->video_buffer, src_s->u_ptr, src_s->v_ptr, src_s->pitch_y, src_s->width, src_s->height, src_wnd);
return;
}
} else if (format_is_yuv(src_s->pixel_format)) {
DDSURFDESC ddsd;
DDPIXELFORMAT pixelFmt;
LPDIRECTDRAWCLIPPER pcClipper;
- const char *opt;
DDCONTEXT;
if (!dd->cur_hwnd || !Width || !Height || !dd->DirectDrawCreate) return GF_BAD_PARAM;
DestroyObjects(dd);
pcClipper->lpVtbl->Release(pcClipper);
- opt = gf_modules_get_option((GF_BaseInterface *)dr, "Video", "DisableVSync");
- if (opt && !strcmp(opt, "yes")) dd->disable_vsync = GF_TRUE;
-
dd->ddraw_init = 1;
/*if YUV not initialize, init using HW video memory to setup HW caps*/
return GF_OK;
- //CreateBackBuffer(dr, Width, Height, dd->yuv_init);
}
static GF_Err DD_LockSurface(DDContext *dd, GF_VideoSurface *vi, LPDDRAWSURFACE surface)
typedef HDC (APIENTRY *RELEASEPBUFFERDCARB)(void *pb, HDC dc);
static RELEASEPBUFFERDCARB wglReleasePbufferDCARB = NULL;
+typedef BOOL (APIENTRY *PFNWGLSWAPINTERVALFARPROC)( int );
+PFNWGLSWAPINTERVALFARPROC wglSwapIntervalEXT = NULL;
+
static void dd_init_gl_offscreen(GF_VideoOutput *driv)
{
const char *opt;
return DD_SetupOpenGL(dr, offscreen_width, offscreen_height);
}
+ dr->max_screen_bpp = dd->bpp;
+
if (wglGetPixelFormatAttribivARB) {
int rb, gb, bb, att;
rb = gb = bb = 0;
dd_init_gl_offscreen(dr);
}
+ if (dd->disable_vsync) {
+ if (!wglSwapIntervalEXT) {
+ wglSwapIntervalEXT = (PFNWGLSWAPINTERVALFARPROC)wglGetProcAddress( "wglSwapIntervalEXT" );
+ }
+ if (wglSwapIntervalEXT) {
+ wglSwapIntervalEXT(0);
+ }
+ }
if ((dd->output_3d_type!=2) || dd->gl_hwnd) {
if (!wglMakeCurrent(dd->gl_HDC, dd->gl_HRC)) return GF_IO_ERR;
{
RECT rc;
DDCONTEXT
+ const char *opt;
dd->os_hwnd = (HWND) os_handle;
DD_SetupWindow(dr, init_flags);
dd->output_3d_type = 0;
#endif
GetWindowRect(dd->cur_hwnd, &rc);
-// return InitDirectDraw(dr, rc.right - rc.left, rc.bottom - rc.top);
+
+ opt = gf_modules_get_option((GF_BaseInterface *)dr, "Video", "DisableVSync");
+ if (opt && !strcmp(opt, "yes")) dd->disable_vsync = GF_TRUE;
+
return GF_OK;
}
u32 MaxWidth, MaxHeight;
DDCONTEXT;
- if (!dd->width ||!dd->height) return GF_BAD_PARAM;
if (bOn == dd->fullscreen) return GF_OK;
if (!dd->fs_hwnd) return GF_NOT_SUPPORTED;
+
dd->fullscreen = bOn;
+
+ if (!dd->width ||!dd->height) return GF_OK;
/*whenever changing card display mode relocate fastest YUV format for blit (since it depends
on the dest pixel format)*/
dd->fs_width = MaxWidth;
dd->fs_height = MaxHeight;
}
- SetWindowPos(dd->cur_hwnd, NULL, X, Y, dd->fs_width, dd->fs_height, SWP_NOZORDER | SWP_SHOWWINDOW | SWP_ASYNCWINDOWPOS);
-
-//#ifndef _WIN32_WCE
- /*commented out since it causes problem on multiple monitors*/
-#if 0
- {
- DEVMODE settings;
-
- memset(&settings, 0, sizeof(DEVMODE));
- settings.dmSize = sizeof(DEVMODE);
- settings.dmPelsWidth = dd->fs_width;
- settings.dmPelsHeight = dd->fs_height;
- settings.dmFields = DM_PELSWIDTH | DM_PELSHEIGHT;
-
- if ( ChangeDisplaySettings(&settings, CDS_FULLSCREEN) != DISP_CHANGE_SUCCESSFUL ) {
- GF_LOG(GF_LOG_ERROR, GF_LOG_MMIO, ("[DirectDraw] cannot change display settings\n"));
- e = GF_IO_ERR;
- }
- }
- dd->NeedRestore = 1;
-#endif
+ SetWindowPos(dd->cur_hwnd, NULL, X, Y, dd->fs_width, dd->fs_height, SWP_SHOWWINDOW | SWP_NOZORDER /*| SWP_ASYNCWINDOWPOS*/);
dd->fs_store_width = dd->fs_width;
dd->fs_store_height = dd->fs_height;
} else if (dd->os_hwnd==dd->fs_hwnd) {
- SetWindowPos(dd->os_hwnd, NULL, 0, 0, dd->store_width+dd->off_w, dd->store_height+dd->off_h, SWP_NOZORDER | SWP_NOMOVE | SWP_ASYNCWINDOWPOS);
+ SetWindowPos(dd->os_hwnd, NULL, 0, 0, dd->store_width+dd->off_w, dd->store_height+dd->off_h, SWP_NOMOVE | SWP_NOZORDER /*| SWP_ASYNCWINDOWPOS*/);
}
if (!e) e = DD_SetupOpenGL(dr, 0, 0);
driv->max_screen_width = GetSystemMetrics(SM_CXSCREEN);
driv->max_screen_height = GetSystemMetrics(SM_CYSCREEN);
+ driv->max_screen_bpp = 8;
driv->hw_caps = GF_VIDEO_HW_OPENGL | GF_VIDEO_HW_OPENGL_OFFSCREEN | GF_VIDEO_HW_OPENGL_OFFSCREEN_ALPHA | GF_VIDEO_HW_HAS_HWND_HDC;
DD_SetupDDraw(driv);
/*create event thread*/
ctx->th = gf_th_new("DirectX Video");
gf_th_run(ctx->th, DD_WindowThread, dr);
- while (!ctx->th_state) gf_sleep(2);
+ while (!ctx->th_state)
+ gf_sleep(1);
}
if (!the_video_output) the_video_output = dr;
}
static GF_Err EDEC_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
* \param size Size to allocate (will use extra padding for real size)
* \return The newly allocated buffer
*/
-static char * ffmpeg_realloc_buffer(char * oldBuffer, u32 size){
- char * buffer;
+static uint8_t * ffmpeg_realloc_buffer(uint8_t * oldBuffer, u32 size){
+ uint8_t * buffer;
/* Size of buffer must be larger, see avcodec_decode_video2 documentation */
u32 allocatedSz = sizeof( char ) * (FF_INPUT_BUFFER_PADDING_SIZE + size);
if (oldBuffer)
gf_free(ctx->extradata);
ctx->extradata_size = dsi_size;
ctx->extradata = ffmpeg_realloc_buffer(ctx->extradata, ctx->extradata_size);
- gf_bs_read_data(bs, ctx->extradata, ctx->extradata_size);
+ gf_bs_read_data(bs, (char *) ctx->extradata, ctx->extradata_size);
return;
}
gf_free(ctx->extradata);
ctx->extradata_size = 0x5a + size;
ctx->extradata = ffmpeg_realloc_buffer(ctx->extradata, ctx->extradata_size);
- strcpy(ctx->extradata, "SVQ3");
- gf_bs_read_data(bs, (unsigned char *)ctx->extradata + 0x5a, size);
+ strcpy((char *) ctx->extradata, "SVQ3");
+ gf_bs_read_data(bs, (char *)ctx->extradata + 0x5a, size);
}
}
break;
gf_free(ctx->extradata);
ctx->extradata_size = dsi_size;
ctx->extradata = ffmpeg_realloc_buffer(ctx->extradata, ctx->extradata_size);
- gf_bs_read_data(bs, ctx->extradata, ctx->extradata_size);
+ gf_bs_read_data(bs, (char *)ctx->extradata, ctx->extradata_size);
break;
}
}
AVCodecContext **ctx;
AVCodec **codec;
AVFrame **frame;
+ const char *sOpt;
#ifndef GPAC_DISABLE_AV_PARSERS
GF_M4VDecSpecInfo dsi;
/*ffmpeg specific*/
(*ctx)->block_align = gf_bs_read_u16(bs);
+ (*ctx)->bit_rate = gf_bs_read_u32(bs);
+ (*ctx)->codec_tag = gf_bs_read_u32(bs);
} else if (ffd->st==GF_STREAM_VISUAL) {
(*ctx)->codec_type = AVMEDIA_TYPE_VIDEO;
(*ctx)->width = gf_bs_read_u16(bs);
(*ctx)->height = gf_bs_read_u16(bs);
+ (*ctx)->bit_rate = gf_bs_read_u32(bs);
+ (*ctx)->codec_tag = gf_bs_read_u32(bs);
+ ffd->raw_pix_fmt = gf_bs_read_u32(bs);
}
- (*ctx)->bit_rate = gf_bs_read_u32(bs);
- (*ctx)->codec_tag = gf_bs_read_u32(bs);
- ffd->raw_pix_fmt = gf_bs_read_u32(bs);
*codec = avcodec_find_decoder(codec_id);
FFDEC_LoadDSI(ffd, bs, *codec, *ctx, 1);
if (ffd->oti == GPAC_OTI_VIDEO_HEVC) {
GF_SystemRTInfo rti;
u32 nb_threads, detected_nb_threads = 1;
- const char *sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "OpenHEVC", "ThreadingType");
+ sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "OpenHEVC", "ThreadingType");
if (sOpt && !strcmp(sOpt, "wpp")) av_opt_set(*ctx, "thread_type", "slice", 0);
else if (sOpt && !strcmp(sOpt, "frame+wpp")) av_opt_set(*ctx, "thread_type", "frameslice", 0);
else {
sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "OpenHEVC", "NumThreads");
if (!sOpt) {
char szO[100];
- //checkme I have perf using too many threads
- if (detected_nb_threads > 6) detected_nb_threads = 6;
sprintf(szO, "%d", detected_nb_threads);
gf_modules_set_option((GF_BaseInterface *)plug, "OpenHEVC", "NumThreads", szO);
nb_threads = detected_nb_threads;
nb_threads = atoi(sOpt);
}
if (nb_threads > detected_nb_threads) {
- GF_LOG(GF_LOG_CODEC, GF_LOG_WARNING, ("[OpenHEVC] Initializing with %d threads but only %d available cores detected on the system\n", nb_threads, rti.nb_cores));
+ GF_LOG(GF_LOG_CODEC, GF_LOG_WARNING, ("[HEVC@ffmpeg] Initializing with %d threads but only %d available cores detected on the system\n", nb_threads, rti.nb_cores));
} else {
- GF_LOG(GF_LOG_CODEC, GF_LOG_INFO, ("[OpenHEVC] Initializing with %d threads\n", nb_threads));
+ GF_LOG(GF_LOG_CODEC, GF_LOG_INFO, ("[HEVC@ffmpeg] Initializing with %d threads\n", nb_threads));
}
- fprintf(stderr, "[OpenHEVC] Initializing with %d threads\n", nb_threads);
+ fprintf(stderr, "[HEVC@ffmpeg] Initializing with %d threads\n", nb_threads);
av_opt_set_int(*ctx, "threads", nb_threads, 0);
/* Set the decoder id */
//av_opt_set_int(openHevcContext->c->priv_data, "decoder-id", i, 0);
+ sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "OpenHEVC", "CBUnits");
+ if (!sOpt) gf_modules_set_option((GF_BaseInterface *)plug, "OpenHEVC", "CBUnits", "4");
+ if (sOpt) ffd->output_cb_size = atoi(sOpt);
}
#endif //HAS_HEVC
+ if (!ffd->output_cb_size) ffd->output_cb_size = 4;
+
if (codec_id == CODEC_ID_RAWVIDEO) {
(*ctx)->codec_id = CODEC_ID_RAWVIDEO;
(*ctx)->pix_fmt = ffd->raw_pix_fmt;
- if ((*ctx)->extradata && strstr((*ctx)->extradata, "BottomUp")) ffd->flipped = 1;
+ if ((*ctx)->extradata && strstr((char *) (*ctx)->extradata, "BottomUp")) ffd->flipped = 1;
} else {
#ifdef USE_AVCTX3
if (avcodec_open2((*ctx), (*codec), NULL )<0) return GF_NON_COMPLIANT_BITSTREAM;
{
AVPacket pkt;
av_init_packet(&pkt);
- pkt.data = esd->decoderConfig->decoderSpecificInfo->data;
+ pkt.data = (uint8_t *) esd->decoderConfig->decoderSpecificInfo->data;
pkt.size = esd->decoderConfig->decoderSpecificInfo->dataLength;
avcodec_decode_video2((*ctx), *frame, &gotpic, &pkt);
}
}
+ sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "Systems", "Output8bit");
+ if (!sOpt) gf_modules_set_option((GF_BaseInterface *)plug, "Systems", "Output8bit", (ffd->display_bpp>8) ? "no" : "yes");
+ if (sOpt && !strcmp(sOpt, "yes")) ffd->output_as_8bit = 1;
+
+ if (ffd->output_as_8bit && (ffd->stride > (u32) (*ctx)->width)) {
+ ffd->stride /=2;
+ ffd->out_size /= 2;
+ ffd->conv_to_8bit = 1;
+ }
+
return GF_OK;
}
*sws = NULL;
}
#endif
+ if (ffd->conv_buffer) {
+ gf_free(ffd->conv_buffer);
+ ffd->conv_buffer = NULL;
+ }
return GF_OK;
}
capability->cap.valueInt = 1;
return GF_OK;
case GF_CODEC_DIRECT_OUTPUT:
- capability->cap.valueBool = /*GF_TRUE*/GF_FALSE;
+ capability->cap.valueBool = GF_TRUE;
return GF_OK;
+ case GF_CODEC_WANTS_THREAD:
+ capability->cap.valueBool= GF_TRUE;
+ break;
}
if (!ffd->base_ctx) {
break;
case GF_CODEC_BUFFER_MAX:
/*for audio let the systems engine decide since we may have very large block size (1 sec with some QT movies)*/
- capability->cap.valueInt = (ffd->st==GF_STREAM_AUDIO) ? 0 : (ffd->is_image ? 1 : 4);
+ capability->cap.valueInt = (ffd->st==GF_STREAM_AUDIO) ? 0 : (ffd->is_image ? 1 : ffd->output_cb_size);
break;
/*by default AAC access unit lasts num_samples (timescale being sampleRate)*/
case GF_CODEC_CU_DURATION:
capability->cap.valueInt = ffd->base_ctx->height;
break;
case GF_CODEC_STRIDE:
- capability->cap.valueInt = ffd->stride;
- if (ffd->out_pix_fmt==GF_PIXEL_RGB_24) capability->cap.valueInt *= 3;
+ if (ffd->out_pix_fmt==GF_PIXEL_RGB_24)
+ capability->cap.valueInt = ffd->stride*3;
+ else if (ffd->conv_buffer)
+ capability->cap.valueInt = ffd->base_ctx->width;
+ else
+ capability->cap.valueInt = ffd->stride;
break;
case GF_CODEC_FPS:
capability->cap.valueFloat = 30.0f;
break;
case GF_CODEC_PIXEL_FORMAT:
if (ffd->base_ctx->width) capability->cap.valueInt = ffd->out_pix_fmt;
+ if (ffd->conv_buffer) capability->cap.valueInt = GF_PIXEL_YV12;
break;
/*ffmpeg performs frame reordering internally*/
case GF_CODEC_REORDER:
assert(plug);
assert( ffd );
switch (capability.CapCode) {
+ case GF_CODEC_DISPLAY_BPP:
+ ffd->display_bpp = capability.cap.valueInt;
+ return GF_OK;
case GF_CODEC_WAIT_RAP:
ffd->frame_start = 0;
if (ffd->st==GF_STREAM_VISUAL) {
static GF_Err FFDEC_ProcessData(GF_MediaDecoder *plug,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
#endif
AVPicture pict;
u32 pix_out;
- s32 w, h, gotpic;
+ s32 w, h, gotpic, stride;
u32 outsize;
AVCodecContext *ctx;
AVCodec **codec;
#ifdef USE_AVCODEC2
av_init_packet(&pkt);
- pkt.data = inBuffer;
+ pkt.data = (uint8_t *)inBuffer;
pkt.size = inBufferLength;
#endif
/*audio stream*/
if (ffd->audio_frame->format==AV_SAMPLE_FMT_FLTP) {
s32 i, j;
s16 *output = (s16 *) outBuffer;
- for (i=0 ; i<ffd->audio_frame->nb_samples ; i++) {
- for (j=0; j<ctx->channels; j++) {
- Float* inputChannel = (Float*)ffd->audio_frame->extended_data[j];
+ for (j=0; j<ctx->channels; j++) {
+ Float* inputChannel = (Float*)ffd->audio_frame->extended_data[j];
+ for (i=0 ; i<ffd->audio_frame->nb_samples ; i++) {
Float sample = inputChannel[i];
if (sample<-1.0f) sample=-1.0f;
else if (sample>1.0f) sample=1.0f;
- output[i*ctx->channels + j] = (int16_t) (sample * GF_SHORT_MAX);
+ output[i*ctx->channels + j] = (int16_t) (sample * GF_SHORT_MAX );
}
}
} else {
if (ffd->raw_pix_fmt==PIX_FMT_BGR24) {
s32 i, j;
for (j=0; j<ctx->height; j++) {
- u8 *src = inBuffer + j*3*ctx->width;
- u8 *dst = outBuffer + j*3*ctx->width;
+ u8 *src = (u8 *) inBuffer + j*3*ctx->width;
+ u8 *dst = (u8 *)outBuffer + j*3*ctx->width;
if (ffd->flipped) {
- dst = outBuffer + (ctx->height-j-1) * 3*ctx->width;
+ dst = (u8 *)outBuffer + (ctx->height-j-1) * 3*ctx->width;
}
for (i=0; i<ctx->width; i++) {
dst[0] = src[2];
return GF_BUFFER_TOO_SMALL;
}
-
+ stride = frame->linesize[0];
+#ifndef NO_10bit
+ if ((ctx->pix_fmt == PIX_FMT_YUV420P10LE) && ffd->output_as_8bit && (frame->linesize[0] >= 2*w) ) {
+ ffd->conv_to_8bit = 1;
+ stride=w;
+ }
+#endif
+
/*recompute outsize in case on-the-fly change*/
if ((w != ctx->width) || (h != ctx->height)
- || (ffd->direct_output && (frame->linesize[0] != ffd->stride))
- || (ffd->out_pix_fmt==GF_PIXEL_YV12 && (ctx->pix_fmt != PIX_FMT_YUV420P))
- ) {
+ || (ffd->direct_output && (stride != ffd->stride))
+ || ((ffd->out_pix_fmt==GF_PIXEL_YV12) && (ctx->pix_fmt != PIX_FMT_YUV420P) && !ffd->output_as_8bit )
+ //need to realloc the conversion buffer
+ || (ffd->conv_to_8bit && !ffd->conv_buffer && ffd->direct_output)
+ ) {
- ffd->stride = ffd->direct_output ? frame->linesize[0] : ctx->width;
+ ffd->stride = (!ffd->conv_to_8bit && ffd->direct_output) ? frame->linesize[0] : ctx->width;
if (ffd->out_pix_fmt == GF_PIXEL_RGB_24) {
outsize = ctx->width * ctx->height * 3;
}
#ifndef NO_10bit
//this YUV format is handled natively in GPAC
- else if (ctx->pix_fmt == PIX_FMT_YUV420P10LE) {
- ffd->stride = 2* ctx->width;
+ else if ((ctx->pix_fmt == PIX_FMT_YUV420P10LE) && !ffd->output_as_8bit) {
+ ffd->stride = ffd->direct_output ? frame->linesize[0] : ctx->width*2;
outsize = ffd->stride * ctx->height * 3 / 2;
ffd->out_pix_fmt = GF_PIXEL_YV12_10;
}
}
#endif
ffd->had_pic = 1;
+
+ if (ffd->conv_to_8bit && ffd->direct_output) {
+ ffd->conv_buffer = gf_realloc(ffd->conv_buffer, sizeof(char)*ffd->out_size);
+ }
+
return GF_BUFFER_TOO_SMALL;
}
/*check PAR in case on-the-fly change*/
}
#endif
- if (ffd->direct_output) {
+ if (ffd->direct_output && !ffd->conv_to_8bit) {
*outBufferLength = ffd->out_size;
return GF_OK;
}
+ if (ffd->conv_to_8bit) {
+ GF_VideoSurface dst;
+ memset(&dst, 0, sizeof(GF_VideoSurface));
+ dst.width = ctx->width;
+ dst.height = ctx->height;
+ dst.pitch_y = ctx->width;
+ dst.video_buffer = ffd->direct_output ? ffd->conv_buffer : outBuffer;
+ dst.pixel_format = GF_PIXEL_YV12;
+
+ gf_color_write_yv12_10_to_yuv(&dst, (u8 *) frame->data[0], frame->data[1], frame->data[2], frame->linesize[0], ctx->width, ctx->height, NULL);
+ *outBufferLength = ffd->out_size;
+ return GF_OK;
+ }
+
+
if (ES_ID == ffd->depth_ES_ID) {
s32 i;
u8 *pYO, *pYD;
pYO = frame->data[0];
- pYD = outBuffer+ffd->yuv_size;
+ pYD = (u8 *) outBuffer+ffd->yuv_size;
for (i=0; i<ctx->height; i++) {
memcpy(pYD, pYO, sizeof(char) * ctx->width);
pYD += ctx->width;
memset(&pict, 0, sizeof(pict));
if (ffd->out_pix_fmt==GF_PIXEL_RGB_24) {
- pict.data[0] = outBuffer;
+ pict.data[0] = (uint8_t *)outBuffer;
pict.linesize[0] = 3*ctx->width;
pix_out = PIX_FMT_RGB24;
} else {
- pict.data[0] = outBuffer;
- pict.data[1] = outBuffer + ffd->stride * ctx->height;
- pict.data[2] = outBuffer + 5 * ffd->stride * ctx->height / 4;
+ pict.data[0] = (uint8_t *)outBuffer;
+ pict.data[1] = (uint8_t *)outBuffer + ffd->stride * ctx->height;
+ pict.data[2] = (uint8_t *)outBuffer + 5 * ffd->stride * ctx->height / 4;
pict.linesize[0] = ffd->stride;
pict.linesize[1] = pict.linesize[2] = ffd->stride/2;
pix_out = PIX_FMT_YUV420P;
FFDec *ffd = ifcg->privateStack;
AVFrame *frame;
+
+ if (ffd->conv_buffer) {
+ *pY_or_RGB = (u8 *) ffd->conv_buffer;
+ *pU = (u8 *) ffd->conv_buffer + ffd->stride * ffd->base_ctx->height;
+ *pV = (u8 *) ffd->conv_buffer + 5*ffd->stride * ffd->base_ctx->height/4;
+ return GF_OK;
+ }
+
if (ES_ID && (ffd->depth_ES_ID==ES_ID)) {
frame = ffd->depth_frame;
*pY_or_RGB = frame->data[0];
{
AVPacket pkt;
s64 seek_to;
- u64 seek_audio, seek_video;
- Bool video_init, do_seek, map_audio_time, map_video_time;
GF_NetworkCommand com;
GF_NetworkCommand map;
GF_SLHeader slh;
map.command_type = GF_NET_CHAN_MAP_TIME;
memset(&com, 0, sizeof(GF_NetworkCommand));
- com.command_type = GF_NET_CHAN_BUFFER_QUERY;
+ com.command_type = GF_NET_BUFFER_QUERY;
memset(&slh, 0, sizeof(GF_SLHeader));
-
+
slh.compositionTimeStampFlag = slh.decodingTimeStampFlag = 1;
- seek_to = (s64) (AV_TIME_BASE*ffd->seek_time);
- map_video_time = !ffd->seekable;
-
- video_init = (seek_to && ffd->video_ch) ? GF_FALSE : GF_TRUE;
- seek_audio = seek_video = 0;
- if (ffd->seekable && (ffd->audio_st>=0)) seek_audio = (u64) (s64) (ffd->seek_time*ffd->audio_tscale.den);
- if (ffd->seekable && (ffd->video_st>=0)) seek_video = (u64) (s64) (ffd->seek_time*ffd->video_tscale.den);
-
- /*it appears that ffmpeg has trouble resyncing on some mpeg files - we trick it by restarting to 0 to get the
- first video frame, and only then seek*/
- if (ffd->seekable) av_seek_frame(ffd->ctx, -1, video_init ? seek_to : 0, AVSEEK_FLAG_BACKWARD);
- do_seek = !video_init;
- map_audio_time = video_init ? ffd->unreliable_audio_timing : 0;
- gf_sleep(1000);
while (ffd->is_running) {
+ if ((!ffd->video_ch && (ffd->video_st>=0)) || (!ffd->audio_ch && (ffd->audio_st>=0))) {
+ gf_sleep(100);
+ continue;
+ }
+ if ((ffd->seek_time>=0) && ffd->seekable) {
+ seek_to = (s64) (AV_TIME_BASE*ffd->seek_time);
+ av_seek_frame(ffd->ctx, -1, seek_to, AVSEEK_FLAG_BACKWARD);
+ ffd->seek_time = -1;
+ }
pkt.stream_index = -1;
/*EOF*/
if (av_read_frame(ffd->ctx, &pkt) <0) break;
gf_mx_p(ffd->mx);
/*blindly send audio as soon as video is init*/
- if (ffd->audio_ch && (pkt.stream_index == ffd->audio_st) && !do_seek) {
+ if (ffd->audio_ch && (pkt.stream_index == ffd->audio_st) ) {
+// u64 seek_audio = ffd->seek_time ? (u64) (s64) (ffd->seek_time*ffd->audio_tscale.den) : 0;
slh.compositionTimeStamp *= ffd->audio_tscale.num;
slh.decodingTimeStamp *= ffd->audio_tscale.num;
- if (map_audio_time) {
- map.base.on_channel = ffd->audio_ch;
- map.map_time.media_time = ffd->seek_time;
- /*mapwith TS=0 since we don't use SL*/
- map.map_time.timestamp = 0;
- map.map_time.reset_buffers = 1;
- map_audio_time = 0;
- gf_term_on_command(ffd->service, &map, GF_OK);
- }
- else if (slh.compositionTimeStamp < seek_audio) {
+#if 0
+ if (slh.compositionTimeStamp < seek_audio) {
slh.decodingTimeStamp = slh.compositionTimeStamp = seek_audio;
}
- gf_term_on_sl_packet(ffd->service, ffd->audio_ch, pkt.data, pkt.size, &slh, GF_OK);
+#endif
+ gf_term_on_sl_packet(ffd->service, ffd->audio_ch, (char *) pkt.data, pkt.size, &slh, GF_OK);
}
else if (ffd->video_ch && (pkt.stream_index == ffd->video_st)) {
+// u64 seek_video = ffd->seek_time ? (u64) (s64) (ffd->seek_time*ffd->video_tscale.den) : 0;
slh.compositionTimeStamp *= ffd->video_tscale.num;
slh.decodingTimeStamp *= ffd->video_tscale.num;
- /*if we get pts = 0 after a seek the demuxer is reseting PTSs, so force map time*/
- if ((!do_seek && seek_to && !slh.compositionTimeStamp) || (map_video_time) ) {
- seek_to = 0;
- map_video_time = 0;
-
- map.base.on_channel = ffd->video_ch;
- map.map_time.timestamp = (u64) pkt.pts;
-// map.map_time.media_time = ffd->seek_time;
- map.map_time.media_time = 0;
- map.map_time.reset_buffers = 0;
- gf_term_on_command(ffd->service, &map, GF_OK);
- }
- else if (slh.compositionTimeStamp < seek_video) {
+#if 0
+ if (slh.compositionTimeStamp < seek_video) {
slh.decodingTimeStamp = slh.compositionTimeStamp = seek_video;
}
- gf_term_on_sl_packet(ffd->service, ffd->video_ch, pkt.data, pkt.size, &slh, GF_OK);
- video_init = 1;
+#endif
+ gf_term_on_sl_packet(ffd->service, ffd->video_ch, (char *) pkt.data, pkt.size, &slh, GF_OK);
}
gf_mx_v(ffd->mx);
av_free_packet(&pkt);
- /*here's the trick - only seek after sending the first packets of each stream - this allows ffmpeg video decoders
- to resync properly*/
- if (do_seek && video_init && ffd->seekable) {
- av_seek_frame(ffd->ctx, -1, seek_to, AVSEEK_FLAG_BACKWARD);
- do_seek = 0;
- map_audio_time = ffd->unreliable_audio_timing;
- }
/*sleep untill the buffer occupancy is too low - note that this work because all streams in this
demuxer are synchronized*/
- while (1) {
- if (ffd->audio_ch) {
- com.base.on_channel = ffd->audio_ch;
- gf_term_on_command(ffd->service, &com, GF_OK);
- if (com.buffer.occupancy < ffd->data_buffer_ms) break;
- }
- if (ffd->video_ch) {
- com.base.on_channel = ffd->video_ch;
- gf_term_on_command(ffd->service, &com, GF_OK);
- if (com.buffer.occupancy < ffd->data_buffer_ms) break;
- }
+ while (ffd->audio_run || ffd->video_run) {
+ gf_term_on_command(ffd->service, &com, GF_OK);
+ if (com.buffer.occupancy < com.buffer.max)
+ break;
gf_sleep(10);
- /*escape if disconnect*/
- if (!ffd->audio_run && !ffd->video_run) break;
}
if (!ffd->audio_run && !ffd->video_run) break;
}
gf_bs_write_u32(bs, dec->bit_rate);
gf_bs_write_u32(bs, dec->codec_tag);
if (dec->extradata_size) {
- gf_bs_write_data(bs, dec->extradata, dec->extradata_size);
+ gf_bs_write_data(bs, (char *) dec->extradata, dec->extradata_size);
}
gf_bs_get_content(bs, (char **) &esd->decoderConfig->decoderSpecificInfo->data, &esd->decoderConfig->decoderSpecificInfo->dataLength);
gf_bs_del(bs);
gf_bs_write_u32(bs, dec->pix_fmt);
if (dec->extradata_size) {
- gf_bs_write_data(bs, dec->extradata, dec->extradata_size);
+ gf_bs_write_data(bs, (char *) dec->extradata, dec->extradata_size);
}
gf_bs_get_content(bs, (char **) &esd->decoderConfig->decoderSpecificInfo->data, &esd->decoderConfig->decoderSpecificInfo->dataLength);
gf_bs_del(bs);
} else {
pd.filename = szName;
pd.buf_size = ffd->buffer_used;
- pd.buf = ffd->buffer;
+ pd.buf = (u8 *) ffd->buffer;
av_in = av_probe_input_format(&pd, 1);
if (!av_in) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] error probing file %s - probe start with %c %c %c %c\n", url, ffd->buffer[0], ffd->buffer[1], ffd->buffer[2], ffd->buffer[3]));
case GF_NET_CHAN_INTERACTIVE:
return ffd->seekable ? GF_OK : GF_NOT_SUPPORTED;
case GF_NET_CHAN_BUFFER:
- com->buffer.max = com->buffer.min = 0;
return GF_OK;
case GF_NET_CHAN_DURATION:
if (ffd->ctx->duration == AV_NOPTS_VALUE)
Bool direct_output;
u32 stride;
+ u32 output_cb_size;
/*for audio packed frames*/
u32 frame_start;
char audio_buf[192000];
#endif
#ifdef USE_AVCTX3
- AVFrame *audio_frame;
+ AVFrame *audio_frame;
#endif
+
+
+ Bool output_as_8bit;
+ u32 display_bpp;
+ Bool conv_to_8bit;
+ char *conv_buffer;
} FFDec;
void *FFDEC_Load();
{
GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("[FreeType] Scanning directory %s (%s)\n", file_name, file_path));
gf_enum_directory(file_path, 0, ft_enum_fonts, cbck, "ttf;ttc");
- return gf_enum_directory(file_path, 1, ft_enum_fonts_dir, cbck, NULL);
+ return (gf_enum_directory(file_path, 1, ft_enum_fonts_dir, cbck, NULL)==GF_OK) ? GF_FALSE : GF_TRUE;
}
GF_Event evt;
dr->max_screen_width = gctx->screen_w = width;
dr->max_screen_height = gctx->screen_h = height;
+ dr->max_screen_bpp = 8;//we don't filter for bpp less than 8
evt.type = GF_EVENT_RESOLUTION;
evt.size.width = dr->max_screen_width;
*vp = INT_TO_JSVAL( (term->compositor->video_out->hw_caps & GF_VIDEO_HW_HAS_RGB) ? 1 : 0 );
}
else if (!strcmp(prop_name, "hardware_rgba")) {
- *vp = INT_TO_JSVAL( (term->compositor->video_out->hw_caps & GF_VIDEO_HW_HAS_RGBA) ? 1 : 0 );
+ u32 has_rgba = (term->compositor->video_out->hw_caps & GF_VIDEO_HW_HAS_RGBA) ? 1 : 0;
+#ifndef GPAC_DISABLE_3D
+ if (term->compositor->hybrid_opengl || term->compositor->is_opengl) has_rgba = 1;
+#endif
+ *vp = INT_TO_JSVAL( has_rgba );
}
else if (!strcmp(prop_name, "hardware_stretch")) {
*vp = INT_TO_JSVAL( (term->compositor->video_out->hw_caps & GF_VIDEO_HW_HAS_STRETCH) ? 1 : 0 );
}
+ else if (!strcmp(prop_name, "screen_width")) {
+ *vp = INT_TO_JSVAL( term->compositor->video_out->max_screen_width);
+ }
+ else if (!strcmp(prop_name, "screen_height")) {
+ *vp = INT_TO_JSVAL( term->compositor->video_out->max_screen_height);
+ }
else if (!strcmp(prop_name, "http_bitrate")) {
*vp = INT_TO_JSVAL( gf_dm_get_data_rate(term->downloader)*8/1024);
}
static GF_Err BMP_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
static GF_Err JP2_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
static GF_Err JPEG_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
static GF_Err PNG_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
s32 has_pending_segments;
Bool clock_discontinuity;
+ Bool disconnected;
} ISOMReader;
Bool buffering;
u32 buffer_min, buffer_max;
+
+
+ u32 nalu_extract_mode;
} ISOMChannel;
void isor_reset_reader(ISOMChannel *ch);
void isor_reader_get_sample(ISOMChannel *ch);
if (esd) {
gf_isom_get_reference(read->mov, i+1, GF_ISOM_REF_BASE, 1, &base_track);
esd->has_ref_base = base_track ? GF_TRUE : GF_FALSE;
- /*FIXME: if we declare only SPS/PPS of the highest layer, we have a problem in decoding even though we have all SPS/PPS inband (OpenSVC bug ?)*/
- /*so we add by default the SPS/PPS of the lower layers to this esd*/
- if (esd->has_ref_base && add_ps_lower) {
- u32 count, refIndex, ref_track, num_sps, num_pps, t;
- GF_AVCConfig *cfg = gf_odf_avc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength);
- GF_AVCConfig *avccfg, *svccfg;
-
- count = gf_isom_get_reference_count(read->mov, i+1, GF_ISOM_REF_SCAL);
- for (refIndex = count; refIndex != 0; refIndex--) {
- gf_isom_get_reference(read->mov, i+1, GF_ISOM_REF_SCAL, refIndex, &ref_track);
- avccfg = gf_isom_avc_config_get(read->mov, ref_track, 1);
- svccfg = gf_isom_svc_config_get(read->mov, ref_track, 1);
- if (avccfg) {
- num_sps = gf_list_count(avccfg->sequenceParameterSets);
- for (t = 0; t < num_sps; t++) {
- GF_AVCConfigSlot *slc = gf_list_get(avccfg->sequenceParameterSets, t);
- GF_AVCConfigSlot *sl = (GF_AVCConfigSlot*)gf_malloc(sizeof(GF_AVCConfigSlot));
- sl->id = slc->id;
- sl->size = slc->size;
- sl->data = (char*)gf_malloc(sizeof(char)*sl->size);
- memcpy(sl->data, slc->data, sizeof(char)*sl->size);
- gf_list_insert(cfg->sequenceParameterSets, sl, 0);
- }
- num_pps = gf_list_count(avccfg->pictureParameterSets);
- for (t = 0; t < num_sps; t++) {
- GF_AVCConfigSlot *slc = gf_list_get(avccfg->pictureParameterSets, t);
- GF_AVCConfigSlot *sl = (GF_AVCConfigSlot*)gf_malloc(sizeof(GF_AVCConfigSlot));
- sl->id = slc->id;
- sl->size = slc->size;
- sl->data = (char*)gf_malloc(sizeof(char)*sl->size);
- memcpy(sl->data, slc->data, sizeof(char)*sl->size);
- gf_list_insert(cfg->pictureParameterSets, sl, 0);
- }
- gf_odf_avc_cfg_del(avccfg);
- }
- if (svccfg) {
- num_sps = gf_list_count(svccfg->sequenceParameterSets);
- for (t = 0; t < num_sps; t++) {
- GF_AVCConfigSlot *slc = gf_list_get(svccfg->sequenceParameterSets, t);
- GF_AVCConfigSlot *sl = (GF_AVCConfigSlot*)gf_malloc(sizeof(GF_AVCConfigSlot));
- sl->id = slc->id;
- sl->size = slc->size;
- sl->data = (char*)gf_malloc(sizeof(char)*sl->size);
- memcpy(sl->data, slc->data, sizeof(char)*sl->size);
- gf_list_insert(cfg->sequenceParameterSets, sl, 0);
- }
- num_pps = gf_list_count(svccfg->pictureParameterSets);
- for (t = 0; t < num_pps; t++) {
- GF_AVCConfigSlot *slc = gf_list_get(svccfg->pictureParameterSets, t);
- GF_AVCConfigSlot *sl = (GF_AVCConfigSlot*)gf_malloc(sizeof(GF_AVCConfigSlot));
- sl->id = slc->id;
- sl->size = slc->size;
- sl->data = (char*)gf_malloc(sizeof(char)*sl->size);
- memcpy(sl->data, slc->data, sizeof(char)*sl->size);
- gf_list_insert(cfg->pictureParameterSets, sl, 0);
- }
- gf_odf_avc_cfg_del(svccfg);
- }
- }
-
- if (esd->decoderConfig->decoderSpecificInfo->data) gf_free(esd->decoderConfig->decoderSpecificInfo->data);
- gf_odf_avc_cfg_write(cfg, &esd->decoderConfig->decoderSpecificInfo->data, &esd->decoderConfig->decoderSpecificInfo->dataLength);
- gf_odf_avc_cfg_del(cfg);
- }
od = (GF_ObjectDescriptor *) gf_odf_desc_new(GF_ODF_OD_TAG);
od->service_ifce = read->input;
read = (ISOMReader *) plug->priv;
reply = GF_OK;
+ read->disconnected = GF_TRUE;
if (read->mov) gf_isom_close(read->mov);
read->mov = NULL;
gf_isom_get_reference(ch->owner->mov, ch->track, GF_ISOM_REF_BASE, 1, &ch->base_track);
ch->next_track = 0;
/*in scalable mode add SPS/PPS in-band*/
- gf_isom_set_nalu_extract_mode(ch->owner->mov, ch->track, GF_ISOM_NALU_EXTRACT_INBAND_PS_FLAG);
+ ch->nalu_extract_mode = GF_ISOM_NALU_EXTRACT_INBAND_PS_FLAG /*| GF_ISOM_NALU_EXTRACT_ANNEXB_FLAG*/;
+ gf_isom_set_nalu_extract_mode(ch->owner->mov, ch->track, ch->nalu_extract_mode);
break;
}
}
/*in scalable mode add SPS/PPS in-band*/
- gf_isom_set_nalu_extract_mode(the_file, next_track, GF_ISOM_NALU_EXTRACT_INBAND_PS_FLAG);
+ gf_isom_set_nalu_extract_mode(the_file, next_track, ch->nalu_extract_mode);
return next_track;
}
if (!plug || !plug->priv || !com) return GF_SERVICE_ERROR;
read = (ISOMReader *) plug->priv;
+ if (read->disconnected) return GF_OK;
if (com->command_type==GF_NET_SERVICE_INFO) {
u32 tag_len;
}
return GF_OK;
}
- if (com->command_type == GF_NET_SERVICE_PROXY_CHUNK_RECEIVE) {
- isor_flush_data(read, 1, 1);
- return GF_OK;
- }
- if (com->command_type == GF_NET_SERVICE_PROXY_SEGMENT_RECEIVE) {
- isor_flush_data(read, 1, 0);
+ if (com->command_type == GF_NET_SERVICE_PROXY_DATA_RECEIVE) {
+ isor_flush_data(read, 1, com->proxy_data.is_chunk);
return GF_OK;
}
if (com->command_type == GF_NET_SERVICE_FLUSH_DATA) {
gf_odf_desc_del((GF_Descriptor *) dcd);
}
return GF_OK;
+ }
+ case GF_NET_CHAN_NALU_MODE:
+ ch->nalu_extract_mode = GF_ISOM_NALU_EXTRACT_INBAND_PS_FLAG;
+ //when this is set, we work in real scalable (eg N streams reassembled by the player) so only extract the layer. This wll need refinements if we plan to support
+ //several scalable layers ...
+ if (com->nalu_mode.extract_mode==1) ch->nalu_extract_mode |= GF_ISOM_NALU_EXTRACT_ANNEXB_FLAG | GF_ISOM_NALU_EXTRACT_VDRD_FLAG | GF_ISOM_NALU_EXTRACT_LAYER_ONLY;
+ gf_isom_set_nalu_extract_mode(ch->owner->mov, ch->track, ch->nalu_extract_mode);
+ break;
default:
break;
}
- }
return GF_NOT_SUPPORTED;
}
}
}
/*rewrite all upcoming SPS/PPS into the samples*/
- gf_isom_set_nalu_extract_mode(read->mov, ch->track, GF_ISOM_NALU_EXTRACT_INBAND_PS_FLAG);
+ gf_isom_set_nalu_extract_mode(read->mov, ch->track, ch->nalu_extract_mode);
ch->last_state = GF_OK;
ch->sample_num++;
fetch_next:
ch->sample = gf_isom_get_sample(ch->owner->mov, ch->track, ch->sample_num, &ivar);
-
/*if sync shadow / carousel RAP skip*/
if (ch->sample && (ch->sample->IsRAP==2)) {
gf_isom_sample_del(&ch->sample);
ch->last_state = GF_OK;
ch->current_slh.accessUnitEndFlag = ch->current_slh.accessUnitStartFlag = 1;
ch->current_slh.accessUnitLength = ch->sample->dataLength;
+ ch->current_slh.au_duration = gf_isom_get_sample_duration(ch->owner->mov, ch->track, ch->sample_num);
/*still seeking or not ?*/
if (ch->start <= ch->sample->DTS + ch->sample->CTS_Offset) {
ch->current_slh.decodingTimeStamp = ch->sample->DTS;
}
}
if (buffer_full) {
- read->in_data_flush = 0;
read->has_pending_segments++;
+ read->in_data_flush = 0;
gf_mx_v(read->segment_mutex);
if (count) {
GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[IsoMedia] Buffer level %d ms higher than max allowed %d ms - skipping dispatch\n", com.buffer.occupancy, com.buffer.max));
static GF_Err MAD_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
#include <gpac/dash.h>
#include <gpac/internal/terminal_dev.h>
+typedef enum
+{
+ MPDIN_BUFFER_NONE=0,
+ MPDIN_BUFFER_MIN=1,
+ MPDIN_BUFFER_SEGMENTS=2
+} MpdInBuffer;
+
typedef struct __mpd_module
{
/* GPAC Service object (i.e. how this module is seen by the terminal)*/
Bool connection_ack_sent;
Bool in_seek;
Bool memory_storage;
- Bool use_max_res, immediate_switch, allow_http_abort, enable_buffering;
+ Bool use_max_res, immediate_switch, allow_http_abort;
u32 use_low_latency;
+ MpdInBuffer buffer_mode;
Double previous_start_range;
/*max width & height in all active representations*/
u32 width, height;
{
GF_NetworkCommand com;
memset(&com, 0, sizeof(GF_NetworkCommand));
- com.base.command_type = chunk_flush ? GF_NET_SERVICE_PROXY_CHUNK_RECEIVE : GF_NET_SERVICE_PROXY_SEGMENT_RECEIVE;
+ com.proxy_data.command_type = GF_NET_SERVICE_PROXY_DATA_RECEIVE;
+ com.proxy_data.is_chunk = chunk_flush;
+ com.proxy_data.is_live = gf_dash_is_dynamic_mpd(group->mpdin->dash);
group->segment_ifce->ServiceCommand(group->segment_ifce, &com);
}
for (i=0; i<gf_dash_get_group_count(mpdin->dash); i++) {
GF_MPDGroup *group;
- if (!gf_dash_is_group_selected(mpdin->dash, i)) continue;
+ if (!gf_dash_is_group_selectable(mpdin->dash, i)) continue;
group = gf_dash_get_group_udta(mpdin->dash, i);
if (group->segment_ifce == ifce) {
gf_dash_group_get_segment_init_url(mpdin->dash, i, ¶m->url_query.start_range, ¶m->url_query.end_range);
+ param->url_query.current_download = 0;
return GF_OK;
}
}
{
GF_Channel *ch;
if (!channel) {
- if (gf_dash_is_group_selected(mpdin->dash, 0)) {
- GF_MPDGroup *mudta = gf_dash_get_group_udta(mpdin->dash, 0);
- return mudta ? mudta->segment_ifce : NULL;
+ u32 i;
+ for (i=0; i<gf_dash_get_group_count(mpdin->dash); i++) {
+ if (gf_dash_is_group_selectable(mpdin->dash, i)) {
+ GF_MPDGroup *mudta = gf_dash_get_group_udta(mpdin->dash, i);
+ if (mudta && mudta->segment_ifce) return mudta->segment_ifce;
+ }
}
return NULL;
}
if (param->msg_type == GF_NETIO_DATA_TRANSFERED) {
u32 bytes_per_sec;
const char *url;
- u64 start_time = gf_dm_sess_get_utc_start(group->sess);
gf_dm_sess_get_stats(group->sess, NULL, &url, NULL, NULL, &bytes_per_sec, NULL);
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] End of file %s download at UTC "LLU" ms - estimated bandwidth %d kbps - started file or last chunk at UTC "LLU"\n", url, gf_net_get_utc(), 8*bytes_per_sec/1000, gf_dm_sess_get_utc_start(group->sess)));
}
}
if (dash_evt==GF_DASH_EVENT_SELECT_GROUPS) {
- const char *opt;
- for (i=0; i<gf_dash_get_group_count(mpdin->dash); i++) {
- /*todo: select groups based on user criteria*/
- gf_dash_group_select(mpdin->dash, i, 1);
- }
- opt = gf_modules_get_option((GF_BaseInterface *)mpdin->plug, "Systems", "Language3CC");
- if (opt && strcmp(opt, "und"))
- gf_dash_groups_set_language(mpdin->dash, opt);
+ //configure buffer in dynamic mode without low latency: we indicate how much the player will buffer
+ if (gf_dash_is_dynamic_mpd(mpdin->dash) && !mpdin->use_low_latency) {
+ u32 buffer_ms = 0;
+ const char *opt = gf_modules_get_option((GF_BaseInterface *)mpdin->plug, "Network", "BufferLength");
+ if (opt) buffer_ms = atoi(opt);
+
+ //use min buffer from MPD
+ if (mpdin->buffer_mode>=MPDIN_BUFFER_MIN) {
+ u32 mpd_buffer_ms = gf_dash_get_min_buffer_time(mpdin->dash);
+ if (mpd_buffer_ms > buffer_ms)
+ buffer_ms = mpd_buffer_ms;
+ }
+ if (buffer_ms) {
+ gf_dash_set_user_buffer(mpdin->dash, buffer_ms);
+ }
+ }
+ //let the player decide which group to play: we declare everything
return GF_OK;
}
/*select input services if possible*/
for (i=0; i<gf_dash_get_group_count(mpdin->dash); i++) {
const char *mime, *init_segment;
- if (!gf_dash_is_group_selected(mpdin->dash, i))
+ //let the player decide which group to play
+ if (!gf_dash_is_group_selectable(mpdin->dash, i))
continue;
mime = gf_dash_group_get_segment_mime(mpdin->dash, i);
GF_Err MPD_ConnectService(GF_InputService *plug, GF_ClientService *serv, const char *url)
{
- GF_MPD_In *mpdin = (GF_MPD_In*) plug->priv;
- const char *opt;
- GF_Err e;
- s32 shift_utc_ms;
+ GF_MPD_In *mpdin = (GF_MPD_In*) plug->priv;
+ const char *opt;
+ GF_Err e;
+ s32 shift_utc_ms, debug_adaptation_set;
u32 max_cache_duration, auto_switch_count, init_timeshift;
Bool use_server_utc;
GF_DASHInitialSelectionMode first_select_mode;
Bool keep_files, disable_switching;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Received Service Connection request (%p) from terminal for %s\n", serv, url));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Received Service Connection request (%p) from terminal for %s\n", serv, url));
- if (!mpdin|| !serv || !url) return GF_BAD_PARAM;
+ if (!mpdin || !serv || !url)
+ return GF_BAD_PARAM;
- mpdin->service = serv;
+ mpdin->service = serv;
mpdin->dash_io.udta = mpdin;
mpdin->dash_io.delete_cache_file = mpdin_dash_io_delete_cache_file;
mpdin->memory_storage = (opt && !strcmp(opt, "yes")) ? 1 : 0;
opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "UseMaxResolution");
- if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "UseMaxResolution", "yes");
- mpdin->use_max_res = (!opt || !strcmp(opt, "yes")) ? 1 : 0;
+ if (!opt) {
+#if defined(_WIN32_WCE) || defined(GPAC_ANDROID) || defined(GPAC_IPHONE)
+ opt = "yes";
+#else
+ opt = "no";
+#endif
+ gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "UseMaxResolution", opt);
+ }
+ mpdin->use_max_res = !strcmp(opt, "yes") ? 1 : 0;
opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "ImmediateSwitching");
if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "ImmediateSwitching", "no");
mpdin->immediate_switch = (opt && !strcmp(opt, "yes")) ? 1 : 0;
- opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "EnableBuffering");
- if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "EnableBuffering", "no");
- mpdin->enable_buffering = (opt && !strcmp(opt, "yes")) ? 1 : 0;
+ opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "BufferingMode");
+ if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "BufferingMode", "minBuffer");
+
+ if (opt && !strcmp(opt, "segments")) mpdin->buffer_mode = MPDIN_BUFFER_SEGMENTS;
+ else if (opt && !strcmp(opt, "none")) mpdin->buffer_mode = MPDIN_BUFFER_NONE;
+ else mpdin->buffer_mode = MPDIN_BUFFER_MIN;
+
opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "LowLatency");
if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "LowLatency", "no");
opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "UseServerUTC");
if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "UseServerUTC", "yes");
use_server_utc = (opt && !strcmp(opt, "yes")) ? 1 : 0;
-
mpdin->in_seek = 0;
mpdin->previous_start_range = -1;
opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "InitialTimeshift");
if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "InitialTimeshift", "0");
if (opt) init_timeshift = atoi(opt);
-
- mpdin->dash = gf_dash_new(&mpdin->dash_io, max_cache_duration, auto_switch_count, keep_files, disable_switching, first_select_mode, mpdin->enable_buffering, init_timeshift);
+
+ mpdin->dash = gf_dash_new(&mpdin->dash_io, max_cache_duration, auto_switch_count, keep_files, disable_switching, first_select_mode, (mpdin->buffer_mode == MPDIN_BUFFER_SEGMENTS) ? 1 : 0, init_timeshift);
if (!mpdin->dash) {
GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[MPD_IN] Error - cannot create DASH Client for %s\n", url));
gf_dash_set_utc_shift(mpdin->dash, shift_utc_ms);
gf_dash_enable_utc_drift_compensation(mpdin->dash, use_server_utc);
-
opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "UseScreenResolution");
- if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "UseScreenResolution", "yes");
+ //default mode is no for the time being
+ if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "UseScreenResolution", "no");
if (!opt || !strcmp(opt, "yes")) {
GF_NetworkCommand com;
memset(&com, 0, sizeof(GF_NetworkCommand));
com.base.command_type = GF_NET_SERVICE_MEDIA_CAP_QUERY;
gf_term_on_command(serv, &com, GF_OK);
+ com.mcaps.width = 1920;
+ com.mcaps.height = 1080;
if (com.mcaps.width && com.mcaps.height) {
- gf_dash_set_max_resolution(mpdin->dash, com.mcaps.width, com.mcaps.height);
+ gf_dash_set_max_resolution(mpdin->dash, com.mcaps.width, com.mcaps.height, com.mcaps.display_bit_depth);
}
}
gf_dash_set_segment_expiration_threshold(mpdin->dash, atoi(opt));
}
+
+ opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "DebugAdaptationSet");
+ if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "DebugAdaptationSet", "-1");
+ debug_adaptation_set = opt ? atoi(opt) : -1;
+
+ gf_dash_debug_group(mpdin->dash, debug_adaptation_set);
+
/*dash thread starts at the end of gf_dash_open */
e = gf_dash_open(mpdin->dash, url);
if (e) {
for (i=0; i<gf_dash_get_group_count(mpdin->dash); i++) {
GF_Descriptor *desc;
GF_MPDGroup *mudta;
+#if 0
if (!gf_dash_is_group_selected(mpdin->dash, i))
continue;
+#endif
mudta = gf_dash_get_group_udta(mpdin->dash, i);
if (!mudta) continue;
if (mudta->service_descriptor_fetched) continue;
case GF_NET_SERVICE_QUALITY_SWITCH:
gf_dash_switch_quality(mpdin->dash, com->switch_quality.up, mpdin->immediate_switch);
return GF_OK;
+
+ default:
+ break;
}
/*not supported*/
if (!com->base.on_channel) return GF_NOT_SUPPORTED;
/* we are interactive (that's the whole point of MPD) */
return GF_OK;
- /*we should get it from MPD minBufferTime*/
case GF_NET_CHAN_BUFFER:
- if (mpdin->enable_buffering) {
- com->buffer.max = gf_dash_get_min_buffer_time(mpdin->dash);
+ /*get it from MPD minBufferTime - if not in low latency mode, indicate the value given in MPD (not possible to fetch segments earlier) - to be more precise we should get the min segment duration for this group*/
+ if (!mpdin->use_low_latency && (mpdin->buffer_mode>=MPDIN_BUFFER_MIN) ) {
+ u32 max = gf_dash_get_min_buffer_time(mpdin->dash);
+ if (max>com->buffer.max)
+ com->buffer.max = max;
+
if (! gf_dash_is_dynamic_mpd(mpdin->dash)) {
- com->buffer.min = 200;
+ com->buffer.min = 1;
}
}
return GF_OK;
idx = MPD_GetGroupIndexForChannel(mpdin, com->play.on_channel);
if (idx>=0) {
+ gf_dash_group_select(mpdin->dash, idx, GF_TRUE);
gf_dash_set_group_done(mpdin->dash, idx, 0);
com->play.dash_segment_switch = gf_dash_group_segment_switch_forced(mpdin->dash, idx);
}
/*don't forward commands, we are killing the service anyway ...*/
if (gf_dash_get_period_switch_status(mpdin->dash) ) return GF_OK;
} else {
- s32 idx = MPD_GetGroupIndexForChannel(mpdin, com->play.on_channel);
+ idx = MPD_GetGroupIndexForChannel(mpdin, com->play.on_channel);
if (idx>=0)
+ gf_dash_group_select(mpdin->dash, idx, GF_TRUE);
com->play.start_range = gf_dash_group_get_start_range(mpdin->dash, idx);
}
*/
GF_MPD_In *mpdin = (GF_MPD_In*) plug->priv;
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Received Can Handle URL In Service (%p) request from terminal for %s\n", mpdin->service, url));
- if (!plug || !plug->priv) return GF_SERVICE_ERROR;
+ if (!plug || !plug->priv) return GF_FALSE;
if (gf_dash_get_url(mpdin->dash) && !strcmp(gf_dash_get_url(mpdin->dash) , url)) {
return 1;
} else {
static const char * MIMES[] = { "video/mpeg-2", "video/mp2t", "video/mpeg", NULL};
-#define M2TS_BUFFER_MAX 200
+//when regulating data rate from file using PCR, this is the maximum sleep we tolerate
+#define M2TS_MAX_SLEEP 200
typedef struct {
char *fragment;
Bool skip_regulation;
Bool has_pending_segments;
+ Bool in_data_flush;
+
Bool hybrid_on;
}M2TSIn;
#endif
slh.compositionTimeStampFlag = 1;
slh.compositionTimeStamp = pck->PTS;
- if (pck->DTS) {
+ if (pck->DTS != pck->PTS) {
slh.decodingTimeStampFlag = 1;
slh.decodingTimeStamp = pck->DTS;
}
diff = (u32) pcr_diff - (stb - ts->stb_at_last_pcr);
}
}
- if (diff<-100) {
+ if (diff<-400) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS In] Demux not going fast enough according to PCR (drift %d, pcr: "LLU", last pcr: "LLU")\n", diff, pcr, ts->pcr_last));
} else if (diff>0) {
u32 sleep_for=1;
}
/*We don't sleep for the entire buffer occupancy, because we would take
the risk of starving the audio chains. We try to keep buffers half full*/
- sleep_for = MIN(com.buffer.occupancy/2, M2TS_BUFFER_MAX);
+ sleep_for = MIN(com.buffer.occupancy/2, M2TS_MAX_SLEEP);
#ifndef GPAC_DISABLE_LOG
if (!nb_sleep) {
}
}
break;
+
+ case GF_M2TS_EVT_TEMI_LOCATION:
+ {
+ GF_NetworkCommand com;
+ memset(&com, 0, sizeof(com));
+ com.addon_info.command_type = GF_NET_ASSOCIATED_CONTENT_LOCATION;
+ com.addon_info.external_URL = ((GF_M2TS_TemiLocationDescriptor*)param)->external_URL;
+ com.addon_info.is_announce = ((GF_M2TS_TemiLocationDescriptor*)param)->is_announce;
+ com.addon_info.is_splicing = ((GF_M2TS_TemiLocationDescriptor*)param)->is_splicing;
+ com.addon_info.activation_countdown = ((GF_M2TS_TemiLocationDescriptor*)param)->activation_countdown;
+ com.addon_info.reload_external = ((GF_M2TS_TemiLocationDescriptor*)param)->reload_external;
+ com.addon_info.timeline_id = ((GF_M2TS_TemiLocationDescriptor*)param)->timeline_id;
+ gf_term_on_command(m2ts->service, &com, GF_OK);
+ }
+ break;
+ case GF_M2TS_EVT_TEMI_TIMECODE:
+ {
+ GF_NetworkCommand com;
+ memset(&com, 0, sizeof(com));
+ com.addon_time.command_type = GF_NET_ASSOCIATED_CONTENT_TIMING;
+ com.addon_time.timeline_id = ((GF_M2TS_TemiTimecodeDescriptor*)param)->timeline_id;
+ com.addon_time.media_pts = ((GF_M2TS_TemiTimecodeDescriptor*)param)->pes_pts;
+ com.addon_time.media_timescale = ((GF_M2TS_TemiTimecodeDescriptor*)param)->media_timescale;
+ com.addon_time.media_timestamp = ((GF_M2TS_TemiTimecodeDescriptor*)param)->media_timestamp;
+ com.addon_time.force_reload = ((GF_M2TS_TemiTimecodeDescriptor*)param)->force_reload;
+ com.addon_time.is_paused = ((GF_M2TS_TemiTimecodeDescriptor*)param)->is_paused;
+ gf_term_on_command(m2ts->service, &com, GF_OK);
+ }
+ break;
}
}
return query_ret;
}
+enum
+{
+ GF_M2TS_PUSH_SEGMENT,
+ GF_M2TS_PUSH_CHUNK,
+ GF_M2TS_FLUSH_DATA
+};
+
void m2ts_flush_data(M2TSIn *m2ts, u32 flush_type)
{
u64 start_byterange, end_byterange;
u32 refresh_type = 0;
const char *url;
+ if (m2ts->in_data_flush) {
+ if (flush_type==GF_M2TS_PUSH_SEGMENT)
+ m2ts->has_pending_segments++;
+ return;
+ }
gf_mx_p(m2ts->mx);
+ m2ts->in_data_flush = 1;
//check buffer level when start of new segment
- if (flush_type<=1) {
+ if (flush_type<=GF_M2TS_PUSH_CHUNK) {
GF_NetworkCommand com;
/*query buffer level on each channel, don't sleep if too low*/
memset(&com, 0, sizeof(GF_NetworkCommand));
gf_term_on_command(m2ts->service, &com, GF_OK);
if (com.buffer.occupancy && (com.buffer.occupancy >= com.buffer.max)) {
//count completed segment that were not dispatched
- if (flush_type==1)
+ if (flush_type==GF_M2TS_PUSH_SEGMENT)
m2ts->has_pending_segments++;
+ m2ts->in_data_flush = 0;
gf_mx_v(m2ts->mx);
return;
}
}
- else if (flush_type==2) {
+ else if (0 && flush_type==GF_M2TS_FLUSH_DATA) {
if (! m2ts->has_pending_segments) {
+ m2ts->in_data_flush = 0;
gf_mx_v(m2ts->mx);
return;
}
}
- e = M2TS_QueryNextFile(m2ts, (flush_type==2) ? 2 : 1, &url, &start_byterange, &end_byterange, &refresh_type);
+ e = M2TS_QueryNextFile(m2ts, (flush_type==GF_M2TS_FLUSH_DATA) ? 2 : 1, &url, &start_byterange, &end_byterange, &refresh_type);
if (e) {
+ m2ts->in_data_flush = 0;
gf_mx_v(m2ts->mx);
return;
}
}
}
+ m2ts->in_data_flush = 0;
gf_mx_v(m2ts->mx);
}
-
static GF_Err M2TS_ConnectService(GF_InputService *plug, GF_ClientService *serv, const char *url)
{
GF_Err e;
//get byte range if any (local playback)
if (url) {
u64 start_byterange, end_byterange;
+ gf_mx_p(m2ts->mx);
+ m2ts->in_data_flush = 1;
M2TS_QueryNextFile(m2ts, 0, NULL, &start_byterange, &end_byterange, NULL);
e = gf_m2ts_demux_file(m2ts->ts, url, start_byterange, end_byterange, 0, 0);
+ M2TS_QueryNextFile(m2ts, 3, NULL, NULL, NULL, NULL);
+ m2ts->in_data_flush = 0;
+ gf_mx_v(m2ts->mx);
} else {
e = GF_OK;
}
}
return GF_OK;
}
- if (com->command_type == GF_NET_SERVICE_PROXY_CHUNK_RECEIVE) {
- m2ts_flush_data(m2ts, 1);
- return GF_OK;
- }
- if (com->command_type == GF_NET_SERVICE_PROXY_SEGMENT_RECEIVE) {
- m2ts_flush_data(m2ts, 0);
+ if (com->command_type == GF_NET_SERVICE_PROXY_DATA_RECEIVE) {
+ m2ts_flush_data(m2ts, com->proxy_data.is_chunk ? GF_M2TS_PUSH_CHUNK : GF_M2TS_PUSH_SEGMENT);
return GF_OK;
}
if (com->command_type == GF_NET_SERVICE_FLUSH_DATA) {
if (plug->query_proxy)
- m2ts_flush_data(m2ts, 2);
+ m2ts_flush_data(m2ts, GF_M2TS_FLUSH_DATA);
return GF_OK;
}
return GF_NOT_SUPPORTED;
/*we cannot seek stream by stream*/
case GF_NET_CHAN_INTERACTIVE:
+ if (m2ts->ts->file) return GF_OK;
return GF_NOT_SUPPORTED;
case GF_NET_CHAN_BUFFER:
+ //do not override config
if (ts->dnload || plug->query_proxy) {
if (!com->buffer.max) com->buffer.max = 1000;
- com->buffer.min = com->buffer.max;
- } else if (ts->file) {
- com->buffer.max = M2TS_BUFFER_MAX;
}
- if (m2ts->low_latency_mode)
- com->buffer.max = M2TS_BUFFER_MAX;
return GF_OK;
case GF_NET_CHAN_DURATION:
com->duration.duration = ts->duration;
return GF_NOT_SUPPORTED;
case GF_NET_SERVICE_FLUSH_DATA:
return GF_NOT_SUPPORTED;
+ default:
+ break;
}
if (!com->base.on_channel) {
memset(info, 0, sizeof(OGGInfo));
/*vorbis*/
- if ((oggpacket->bytes >= 7) && !strncmp(&oggpacket->packet[1], "vorbis", 6)) {
+ if ((oggpacket->bytes >= 7) && !strncmp((char *) &oggpacket->packet[1], "vorbis", 6)) {
info->streamType = GF_STREAM_AUDIO;
oggpack_readinit(&opb, oggpacket->packet, oggpacket->bytes);
oggpack_adv( &opb, 88);
info->type = OGG_VORBIS;
}
/*speex*/
- else if ((oggpacket->bytes >= 7) && !strncmp(&oggpacket->packet[0], "Speex", 5)) {
+ else if ((oggpacket->bytes >= 7) && !strncmp((char *) &oggpacket->packet[0], "Speex", 5)) {
info->streamType = GF_STREAM_AUDIO;
oggpack_readinit(&opb, oggpacket->packet, oggpacket->bytes);
oggpack_adv(&opb, 224);
info->num_init_headers = 1;
}
/*flac*/
- else if ((oggpacket->bytes >= 4) && !strncmp(&oggpacket->packet[0], "fLaC", 4)) {
+ else if ((oggpacket->bytes >= 4) && !strncmp((char *) &oggpacket->packet[0], "fLaC", 4)) {
info->streamType = GF_STREAM_AUDIO;
info->type = 3;
info->num_init_headers = OGG_FLAC;
}
/*theora*/
- else if ((oggpacket->bytes >= 7) && !strncmp(&oggpacket->packet[1], "theora", 6)) {
+ else if ((oggpacket->bytes >= 7) && !strncmp((char *) &oggpacket->packet[1], "theora", 6)) {
GF_BitStream *bs;
u32 fps_numerator, fps_denominator, keyframe_freq_force;
info->streamType = GF_STREAM_VISUAL;
info->type = OGG_THEORA;
- bs = gf_bs_new(oggpacket->packet, oggpacket->bytes, GF_BITSTREAM_READ);
+ bs = gf_bs_new((char *) oggpacket->packet, oggpacket->bytes, GF_BITSTREAM_READ);
gf_bs_read_int(bs, 56);
gf_bs_read_int(bs, 8); /* major version num */
gf_bs_read_int(bs, 8); /* minor version num */
slh.randomAccessPointFlag = 1;
slh.compositionTimeStampFlag = 1;
slh.compositionTimeStamp = st->ogg_ts;
- gf_term_on_sl_packet(read->service, st->ch, oggpacket->packet, oggpacket->bytes, &slh, GF_OK);
- st->ogg_ts += gf_vorbis_check_frame(&st->vp, oggpacket->packet, oggpacket->bytes);
+ gf_term_on_sl_packet(read->service, st->ch, (char *) oggpacket->packet, oggpacket->bytes, &slh, GF_OK);
+ st->ogg_ts += gf_vorbis_check_frame(&st->vp, (char *) oggpacket->packet, oggpacket->bytes);
}
else if (st->info.type==OGG_THEORA) {
oggpack_buffer opb;
slh.randomAccessPointFlag = oggpackB_read(&opb, 1) ? 0 : 1;
slh.compositionTimeStampFlag = 1;
slh.compositionTimeStamp = st->ogg_ts;
- gf_term_on_sl_packet(read->service, st->ch, oggpacket->packet, oggpacket->bytes, &slh, GF_OK);
+ gf_term_on_sl_packet(read->service, st->ch, (char *) oggpacket->packet, oggpacket->bytes, &slh, GF_OK);
st->ogg_ts += 1000;
}
}
while (ogg_stream_packetout(&st->os, &oggpacket ) > 0 ) {
GF_BitStream *bs;
if (st->info.type==OGG_VORBIS)
- gf_vorbis_parse_header(&st->vp, oggpacket.packet, oggpacket.bytes);
+ gf_vorbis_parse_header(&st->vp, (char *) oggpacket.packet, oggpacket.bytes);
bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
if (st->dsi) {
st->dsi_len=0;
}
gf_bs_write_u16(bs, oggpacket.bytes);
- gf_bs_write_data(bs, oggpacket.packet, oggpacket.bytes);
+ gf_bs_write_data(bs, (char *) oggpacket.packet, oggpacket.bytes);
gf_bs_get_content(bs, (char **)&st->dsi, &st->dsi_len);
gf_bs_del(bs);
st->parse_headers--;
static GF_Err THEO_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
static GF_Err VORB_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
#include <gpac/internal/media_dev.h>
#include <openHevcWrapper.h>
-//#define OPEN_SHVC
+#define OPEN_SHVC
#if defined(WIN32) && !defined(_WIN32_WCE) && !defined(__GNUC__)
# pragma comment(lib, "libLibOpenHevcWrapper")
+
+#if !defined _WIN64
+void libOpenHevcSetViewLayers(OpenHevc_Handle openHevcHandle, int val)
+{
+}
+#endif
+
#endif
typedef struct
u16 ES_ID;
u32 width, stride, height, out_size, pixel_ar, layer, nb_threads, luma_bpp, chroma_bpp;
+ Bool output_as_8bit;
Bool is_init;
Bool had_pic;
GF_ESD *esd;
OpenHevc_Handle openHevcHandle;
-#ifdef OPEN_SHVC
u32 nb_layers;
- Bool base_only;
-#endif
+ u32 output_cb_size;
+
+ u32 display_bpp;
+ Bool conv_to_8bit;
+ char *conv_buffer;
+
} HEVCDec;
+static GF_Err HEVC_ConfigurationScalableStream(HEVCDec *ctx, GF_ESD *esd)
+{
+ GF_HEVCConfig *cfg = NULL;
+ char *data;
+ u32 data_len;
+ GF_BitStream *bs;
+ u32 i, j;
+ if (!esd->decoderConfig->decoderSpecificInfo || !esd->decoderConfig->decoderSpecificInfo->data)
+ return GF_OK;
+ cfg = gf_odf_hevc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, 0);
+ if (!cfg) return GF_NON_COMPLIANT_BITSTREAM;
+ if (ctx->nalu_size_length != cfg->nal_unit_size)
+ return GF_NON_COMPLIANT_BITSTREAM;
+
+ bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
+ for (i=0; i< gf_list_count(cfg->param_array); i++) {
+ GF_HEVCParamArray *ar = (GF_HEVCParamArray *)gf_list_get(cfg->param_array, i);
+ for (j=0; j< gf_list_count(ar->nalus); j++) {
+ GF_AVCConfigSlot *sl = (GF_AVCConfigSlot *)gf_list_get(ar->nalus, j);
+ gf_bs_write_int(bs, sl->size, 8*ctx->nalu_size_length);
+ gf_bs_write_data(bs, sl->data, sl->size);
+ }
+ }
+
+ gf_bs_get_content(bs, &data, &data_len);
+ gf_bs_del(bs);
+ libOpenHevcDecode(ctx->openHevcHandle, (u8 *)data, data_len, 0);
+ gf_free(data);
+
+ libOpenHevcSetActiveDecoders(ctx->openHevcHandle, 2);
+ libOpenHevcSetViewLayers(ctx->openHevcHandle, 1);
+ return GF_OK;
+}
static GF_Err HEVC_ConfigureStream(HEVCDec *ctx, GF_ESD *esd)
{
- u32 i, j;
+ u32 i, j;
GF_HEVCConfig *cfg = NULL;
ctx->ES_ID = esd->ESID;
ctx->width = ctx->height = ctx->out_size = ctx->luma_bpp = ctx->chroma_bpp = 0;
-#ifdef OPEN_SHVC
ctx->nb_layers = 1;
- ctx->base_only = GF_FALSE;
-#endif
-
if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) {
- cfg = gf_odf_hevc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, 0);
+ HEVCState hevc;
+ memset(&hevc, 0, sizeof(HEVCState));
+
+ cfg = gf_odf_hevc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, 0);
if (!cfg) return GF_NON_COMPLIANT_BITSTREAM;
- ctx->nalu_size_length = cfg->nal_unit_size;
-
+ ctx->nalu_size_length = cfg->nal_unit_size;
+
for (i=0; i< gf_list_count(cfg->param_array); i++) {
- GF_HEVCParamArray *ar = gf_list_get(cfg->param_array, i);
- if (ar->type==GF_HEVC_NALU_SEQ_PARAM) {
- for (j=0; j< gf_list_count(ar->nalus); j++) {
- GF_AVCConfigSlot *sl = gf_list_get(ar->nalus, j);
- HEVCState hevc;
- s32 idx;
- u16 hdr = sl->data[0] << 8 | sl->data[1];
+ GF_HEVCParamArray *ar = (GF_HEVCParamArray *)gf_list_get(cfg->param_array, i);
+ for (j=0; j< gf_list_count(ar->nalus); j++) {
+ GF_AVCConfigSlot *sl = (GF_AVCConfigSlot *)gf_list_get(ar->nalus, j);
+ s32 idx;
+ u16 hdr = sl->data[0] << 8 | sl->data[1];
+ if (ar->type==GF_HEVC_NALU_SEQ_PARAM) {
idx = gf_media_hevc_read_sps(sl->data, sl->size, &hevc);
ctx->width = MAX(hevc.sps[idx].width, ctx->width);
ctx->height = MAX(hevc.sps[idx].height, ctx->height);
ctx->chroma_bpp = MAX(hevc.sps[idx].bit_depth_chroma, ctx->chroma_bpp);
if (hdr & 0x1f8) {
-#ifdef OPEN_SHVC
- ctx->nb_layers ++;
-#endif
- }
- }
- }
- }
+ ctx->nb_layers ++;
+ }
+ }
+ else if (ar->type==GF_HEVC_NALU_VID_PARAM) {
+ gf_media_hevc_read_vps(sl->data, sl->size, &hevc);
+ }
+ else if (ar->type==GF_HEVC_NALU_PIC_PARAM) {
+ gf_media_hevc_read_pps(sl->data, sl->size, &hevc);
+ }
+ }
+ }
gf_odf_hevc_cfg_del(cfg);
- } else {
+ } else {
ctx->nalu_size_length = 0;
}
-#ifdef OPEN_SHVC
- ctx->openHevcHandle = libOpenHevcInit(ctx->nb_threads, ctx->nb_layers, 0);
-#else
ctx->openHevcHandle = libOpenHevcInit(ctx->nb_threads, ctx->threading_type);
-#endif
-
- if (esd->decoderConfig && esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) {
- libOpenHevcCopyExtraData(ctx->openHevcHandle, esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength+8);
- }
#ifndef GPAC_DISABLE_LOG
if (gf_log_tool_level_on(GF_LOG_CODEC, GF_LOG_DEBUG) ) {
libOpenHevcSetDebugMode(ctx->openHevcHandle, 1);
}
#endif
+
+
+ if (esd->decoderConfig && esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) {
+ libOpenHevcSetActiveDecoders(ctx->openHevcHandle, ctx->nb_layers);
+ libOpenHevcSetViewLayers(ctx->openHevcHandle, ctx->nb_layers-1);
+
+ libOpenHevcCopyExtraData(ctx->openHevcHandle, (u8 *) esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength);
+ } else {
+ //hardcoded values: 2 layers max, display layer 0
+ libOpenHevcSetActiveDecoders(ctx->openHevcHandle, 1/*ctx->nb_layers*/);
+ libOpenHevcSetViewLayers(ctx->openHevcHandle, 0/*ctx->nb_layers-1*/);
+ }
+
libOpenHevcStartDecoder(ctx->openHevcHandle);
ctx->stride = ((ctx->luma_bpp==8) && (ctx->chroma_bpp==8)) ? ctx->width : ctx->width * 2;
ctx->out_size = ctx->stride * ctx->height * 3 / 2;
+
+ if (ctx->output_as_8bit && (ctx->stride>ctx->width)) {
+ ctx->stride /=2;
+ ctx->out_size /= 2;
+ ctx->chroma_bpp = ctx->luma_bpp = 8;
+ ctx->conv_to_8bit = 1;
+ }
return GF_OK;
}
sOpt = gf_modules_get_option((GF_BaseInterface *)ifcg, "OpenHEVC", "NumThreads");
if (!sOpt) {
char szO[100];
- //checkme I have perf using too many threads
- if (nb_threads > 6)
- nb_threads = 6;
sprintf(szO, "%d", nb_threads);
gf_modules_set_option((GF_BaseInterface *)ifcg, "OpenHEVC", "NumThreads", szO);
ctx->nb_threads = nb_threads;
sOpt = gf_modules_get_option((GF_BaseInterface *)ifcg, "OpenHEVC", "ThreadingType");
if (sOpt && !strcmp(sOpt, "wpp")) ctx->threading_type = 2;
- else if (sOpt && !strcmp(sOpt, "frame+wpp")) ctx->threading_type = 3;
+ else if (sOpt && !strcmp(sOpt, "frame+wpp")) ctx->threading_type = 4;
else {
ctx->threading_type = 1;
- if (!sOpt) gf_modules_set_option((GF_BaseInterface *)ifcg, "OpenHEVC", "ThreadingType", "frame+wpp");
+ if (!sOpt) gf_modules_set_option((GF_BaseInterface *)ifcg, "OpenHEVC", "ThreadingType", "frame");
}
+ sOpt = gf_modules_get_option((GF_BaseInterface *)ifcg, "Systems", "Output8bit");
+ if (!sOpt) gf_modules_set_option((GF_BaseInterface *)ifcg, "Systems", "Output8bit", (ctx->display_bpp>8) ? "no" : "yes");
+ if (sOpt && !strcmp(sOpt, "yes")) ctx->output_as_8bit = 1;
+
+ sOpt = gf_modules_get_option((GF_BaseInterface *)ifcg, "OpenHEVC", "CBUnits");
+ if (!sOpt) gf_modules_set_option((GF_BaseInterface *)ifcg, "OpenHEVC", "CBUnits", "4");
+ if (sOpt) ctx->output_cb_size = atoi(sOpt);
+ if (!ctx->output_cb_size) ctx->output_cb_size = 4;
- /*once base layer is configured, nothing to do on enhancement*/
- if (esd->dependsOnESID) return GF_OK;
+
+
+ /*RTP case: configure enhancement now*/
+ if (esd->dependsOnESID) {
+ HEVC_ConfigurationScalableStream(ctx, esd);
+ return GF_OK;
+ }
ctx->esd = esd;
return HEVC_ConfigureStream(ctx, esd);
ctx->is_init = GF_FALSE;
}
ctx->width = ctx->height = ctx->out_size = 0;
+ if (ctx->conv_buffer) gf_free(ctx->conv_buffer);
+ ctx->conv_buffer = NULL;
return GF_OK;
}
switch (capability->CapCode) {
case GF_CODEC_RESILIENT:
- capability->cap.valueInt = 1;
+ capability->cap.valueInt = 2;
break;
case GF_CODEC_WIDTH:
capability->cap.valueInt = ctx->width;
break;
case GF_CODEC_STRIDE:
capability->cap.valueInt = ctx->stride;
+ if (ctx->direct_output && !ctx->conv_buffer) {
+ //to fix soon - currently hardcoded to 32 pixels
+ if ((ctx->luma_bpp==8) && (ctx->chroma_bpp==8))
+ capability->cap.valueInt += 32;
+ else
+ capability->cap.valueInt += 64;
+ }
break;
case GF_CODEC_PAR:
capability->cap.valueInt = ctx->pixel_ar;
capability->cap.valueInt = 1;
break;
case GF_CODEC_BUFFER_MAX:
- capability->cap.valueInt = 4;
+ capability->cap.valueInt = ctx->output_cb_size;
+ break;
+ case GF_CODEC_WANTS_THREAD:
+ capability->cap.valueBool= GF_TRUE;
break;
case GF_CODEC_PADDING_BYTES:
capability->cap.valueInt = 32;
case GF_CODEC_REORDER:
capability->cap.valueInt = 1;
break;
+ case GF_CODEC_TRUSTED_CTS:
+ capability->cap.valueInt = 1;
+ break;
case GF_CODEC_DIRECT_OUTPUT:
capability->cap.valueBool = 1;
break;
{
HEVCDec *ctx = (HEVCDec*) ifcg->privateStack;
switch (capability.CapCode) {
+ case GF_CODEC_DISPLAY_BPP:
+ ctx->display_bpp = capability.cap.valueInt;
+ return GF_OK;
case GF_CODEC_WAIT_RAP:
if (ctx->openHevcHandle)
libOpenHevcFlush(ctx->openHevcHandle);
return GF_OK;
-#ifdef OPEN_SHVC
case GF_CODEC_MEDIA_SWITCH_QUALITY:
/*switch up*/
- if (capability.cap.valueInt) {
- ctx->base_only = GF_FALSE;
+ if (capability.cap.valueInt > 0) {
+ libOpenHevcSetViewLayers(ctx->openHevcHandle, 1);
} else {
- ctx->base_only = GF_TRUE;
+ libOpenHevcSetViewLayers(ctx->openHevcHandle, 0);
}
return GF_OK;
-#endif
case GF_CODEC_DIRECT_OUTPUT:
ctx->direct_output = GF_TRUE;
+ if (ctx->conv_to_8bit && ctx->out_size)
+ ctx->conv_buffer = gf_realloc(ctx->conv_buffer, sizeof(char)*ctx->out_size);
+
return GF_OK;
}
/*return unsupported to avoid confusion by the player (like color space changing ...) */
}
-static GF_Err HEVC_flush_picture(HEVCDec *ctx, char *outBuffer, u32 *outBufferLength )
+static GF_Err HEVC_flush_picture(HEVCDec *ctx, char *outBuffer, u32 *outBufferLength, u32 *CTS)
{
unsigned int a_w, a_h, a_stride, bit_depth;
OpenHevc_Frame_cpy openHevcFrame;
- u8 *pY, *pU, *pV;
libOpenHevcGetPictureInfo(ctx->openHevcHandle, &openHevcFrame.frameInfo);
a_h = openHevcFrame.frameInfo.nHeight;
a_stride = openHevcFrame.frameInfo.nYPitch;
bit_depth = openHevcFrame.frameInfo.nBitDepth;
- if ((ctx->luma_bpp>8) || (ctx->chroma_bpp>8)) a_stride *= 2;
+
+ *CTS = (u32) openHevcFrame.frameInfo.nTimeStamp;
+
+ if (!ctx->output_as_8bit) {
+ if ((ctx->luma_bpp>8) || (ctx->chroma_bpp>8)) a_stride *= 2;
+ } else {
+ if (bit_depth>8) {
+ bit_depth=8;
+
+ ctx->conv_to_8bit = 1;
+ }
+ }
if ((ctx->width != a_w) || (ctx->height!=a_h) || (ctx->stride != a_stride) || (ctx->luma_bpp!= bit_depth) || (ctx->chroma_bpp != bit_depth) ){
ctx->width = a_w;
ctx->luma_bpp = ctx->chroma_bpp = bit_depth;
/*always force layer resize*/
*outBufferLength = ctx->out_size;
+
+ if (ctx->conv_to_8bit && ctx->direct_output) {
+ ctx->conv_buffer = gf_realloc(ctx->conv_buffer, sizeof(char)*ctx->out_size);
+ }
return GF_BUFFER_TOO_SMALL;
}
- if (ctx->direct_output) {
- OpenHevc_Frame HVCFrame;
- libOpenHevcGetOutput(ctx->openHevcHandle, 1, &HVCFrame);
+ if (!ctx->conv_to_8bit && ctx->direct_output) {
*outBufferLength = ctx->out_size;
ctx->has_pic = GF_TRUE;
} else {
-
- pY = outBuffer;
- pU = outBuffer + ctx->stride * ctx->height;
- pV = outBuffer + 5*ctx->stride * ctx->height/4;
- openHevcFrame.pvY = (void*) pY;
- openHevcFrame.pvU = (void*) pU;
- openHevcFrame.pvV = (void*) pV;
- *outBufferLength = 0;
- if (libOpenHevcGetOutputCpy(ctx->openHevcHandle, 1, &openHevcFrame)) {
- *outBufferLength = ctx->out_size;
+ if (ctx->conv_to_8bit) {
+ OpenHevc_Frame openHevcFramePtr;
+ if (libOpenHevcGetOutput(ctx->openHevcHandle, 1, &openHevcFramePtr)) {
+ GF_VideoSurface dst;
+ memset(&dst, 0, sizeof(GF_VideoSurface));
+ dst.width = ctx->width;
+ dst.height = ctx->height;
+ dst.pitch_y = ctx->width;
+ dst.video_buffer = ctx->direct_output ? ctx->conv_buffer : outBuffer;
+ dst.pixel_format = GF_PIXEL_YV12;
+
+ gf_color_write_yv12_10_to_yuv(&dst, (u8 *) openHevcFramePtr.pvY, (u8 *) openHevcFramePtr.pvU, (u8 *) openHevcFramePtr.pvV, (openHevcFramePtr.frameInfo.nYPitch + 32)*2, ctx->width, ctx->height, NULL);
+ *outBufferLength = ctx->out_size;
+
+ if (ctx->direct_output )
+ ctx->has_pic = GF_TRUE;
+ }
+ } else {
+ openHevcFrame.pvY = (void*) outBuffer;
+ openHevcFrame.pvU = (void*) (outBuffer + ctx->stride * ctx->height);
+ openHevcFrame.pvV = (void*) (outBuffer + 5*ctx->stride * ctx->height/4);
+ *outBufferLength = 0;
+ if (libOpenHevcGetOutputCpy(ctx->openHevcHandle, 1, &openHevcFrame)) {
+ *outBufferLength = ctx->out_size;
+ }
}
}
return GF_OK;
static GF_Err HEVC_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
if (!inBuffer) {
if ( libOpenHevcDecode(ctx->openHevcHandle, NULL, 0, 0) ) {
- return HEVC_flush_picture(ctx, outBuffer, outBufferLength);
+ return HEVC_flush_picture(ctx, outBuffer, outBufferLength, CTS);
}
return GF_OK;
}
-
- GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("HEVC: Decoding AU %d bytes\n", inBufferLength));
-
- if (!ES_ID || (ES_ID!=ctx->ES_ID) ) {
+ if (!ES_ID) {
*outBufferLength = 0;
return GF_OK;
}
+
if (*outBufferLength < ctx->out_size) {
*outBufferLength = ctx->out_size;
return GF_BUFFER_TOO_SMALL;
if (ctx->had_pic) {
ctx->had_pic = 0;
- return HEVC_flush_picture(ctx, outBuffer, outBufferLength);
+ return HEVC_flush_picture(ctx, outBuffer, outBufferLength, CTS);
}
-
- got_pic = libOpenHevcDecode(ctx->openHevcHandle, inBuffer, inBufferLength, 0);
+ got_pic = libOpenHevcDecode(ctx->openHevcHandle, (u8 *) inBuffer, inBufferLength, *CTS);
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[HEVC Decoder] Decode CTS %d - size %d - got pic %d\n", *CTS, inBufferLength, got_pic));
if (got_pic>0) {
- e = HEVC_flush_picture(ctx, outBuffer, outBufferLength);
+ e = HEVC_flush_picture(ctx, outBuffer, outBufferLength, CTS);
if (e) return e;
got_pic = 0;
}
+
return GF_OK;
}
if (!ctx->has_pic) return GF_BAD_PARAM;
ctx->has_pic = GF_FALSE;
+ if (ctx->conv_buffer) {
+ *pY_or_RGB = (u8 *) ctx->conv_buffer;
+ *pU = (u8 *) ctx->conv_buffer + ctx->stride * ctx->height;
+ *pV = (u8 *) ctx->conv_buffer + 5*ctx->stride * ctx->height/4;
+ return GF_OK;
+ }
+
res = libOpenHevcGetOutput(ctx->openHevcHandle, 1, &openHevcFrame);
if ((res<=0) || !openHevcFrame.pvY || !openHevcFrame.pvU || !openHevcFrame.pvV)
return GF_SERVICE_ERROR;
/*decode all NALUs*/
count = gf_list_count(cfg->sequenceParameterSets);
- SetCommandLayer(Layer, 255, 0, &i, 0);//bufindex can be reset without pb
+ SetCommandLayer(Layer, 255, 0, &res, 0);//bufindex can be reset without pb
for (i=0; i<count; i++) {
- u32 w=0, h=0, par_n=0, par_d=0;
+ u32 w=0, h=0, sid;
+ s32 par_n=0, par_d=0;
GF_AVCConfigSlot *slc = gf_list_get(cfg->sequenceParameterSets, i);
#ifndef GPAC_DISABLE_AV_PARSERS
- gf_avc_get_sps_info(slc->data, slc->size, &slc->id, &w, &h, &par_n, &par_d);
+ gf_avc_get_sps_info(slc->data, slc->size, &sid, &w, &h, &par_n, &par_d);
#endif
/*by default use the base layer*/
if (!i) {
ctx->pixel_ar = (par_n<<16) || par_d;
}
}
- res = decodeNAL(ctx->codec, slc->data, slc->size, &Picture, Layer);
+ res = decodeNAL(ctx->codec, (unsigned char *) slc->data, slc->size, &Picture, Layer);
if (res<0) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[SVC Decoder] Error decoding SPS %d\n", res));
}
u32 sps_id, pps_id;
GF_AVCConfigSlot *slc = gf_list_get(cfg->pictureParameterSets, i);
gf_avc_get_pps_info(slc->data, slc->size, &pps_id, &sps_id);
- res = decodeNAL(ctx->codec, slc->data, slc->size, &Picture, Layer);
+ res = decodeNAL(ctx->codec, (unsigned char *) slc->data, slc->size, &Picture, Layer);
if (res<0) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[SVC Decoder] Error decoding PPS %d\n", res));
}
static GF_Err OSVC_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
return GF_BUFFER_TOO_SMALL;
}
- ctx->MaxDqId = GetDqIdMax(inBuffer, inBufferLength, ctx->nalu_size_length, ctx->DqIdTable, ctx->nalu_size_length ? 1 : 0);
+ ctx->MaxDqId = GetDqIdMax((unsigned char *) inBuffer, inBufferLength, ctx->nalu_size_length, ctx->DqIdTable, ctx->nalu_size_length ? 1 : 0);
if (!ctx->init_layer_set) {
//AVC stream in a h264 file
if (ctx->MaxDqId == -1)
got_pic = 0;
nalu_size = 0;
- ptr = inBuffer;
+ ptr = (u8 *) inBuffer;
if (!ctx->nalu_size_length) {
u32 size;
sc_size = 0;
- size = gf_media_nalu_next_start_code(inBuffer, inBufferLength, &sc_size);
+ size = gf_media_nalu_next_start_code((u8 *) inBuffer, inBufferLength, &sc_size);
if (sc_size) {
ptr += size+sc_size;
assert(inBufferLength >= size+sc_size);
* Copyright (c) Telecom ParisTech 2011-2012
* All rights reserved
*
- * This file is part of GPAC / User Event Recorder sub-project
+ * This file is part of GPAC / Sampe On-Scvreen Display sub-project
*
* GPAC is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
Bool session_migration;
- Bool is_svc;
+ Bool is_scalable;
u32 cur_mid;
} RTPClient;
else if (!strcmp(att->Name, "x-session-name")) url = att->Value;
else if (!strcmp(att->Name, "x-session-id")) session_id = att->Value;
/*we have the H264-SVC streams*/
- else if (!strcmp(att->Name, "group") && !strncmp(att->Value, "DDP", 3)) rtp->is_svc = 1;
+ else if (!strcmp(att->Name, "group") && !strncmp(att->Value, "DDP", 3)) rtp->is_scalable = 1;
}
if (range) {
Start = range->start;
gf_list_add(od->ESDescriptors, esd);
// for each channel depending on this channel, get esd, set esd->dependsOnESID and add to od
- if (ch->owner->is_svc)
+ if (ch->owner->is_scalable)
{
u32 i, count;
RVCDec *ctx = (RVCDec*) ifcg->privateStack;
char* VTLFolder;
char *XDF_doc = NULL;
- int isAVCFile;
+ int isNALUFile;
/*not supported in this version*/
if (esd->dependsOnESID) return GF_NOT_SUPPORTED;
VTLFolder = (char *)gf_modules_get_option((GF_BaseInterface *)ifcg, "RVCDecoder", "VTLPath");
if (!VTLFolder) {
- GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[RVC_Dec] Cannot locate VTL: path is unknown. Please indicate path in GPAC config file:\n[RVCDecoder]\nVTLPath=PATH\n"));
+ GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[RVC Decoder] Cannot locate VTL: path is unknown. Please indicate path in GPAC config file:\n[RVCDecoder]\nVTLPath=PATH\n"));
return GF_SERVICE_ERROR;
} else {
- GF_LOG(GF_LOG_INFO, GF_LOG_CODEC, ("[RVC_Dec] Using VTL in %s\n", VTLFolder));
+ GF_LOG(GF_LOG_INFO, GF_LOG_CODEC, ("[RVC Decoder] Using VTL in %s\n", VTLFolder));
}
/*initialize RVC*/
}
- if(esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_AVC) isAVCFile = 1;
- else isAVCFile = 0;
+ switch (esd->decoderConfig->objectTypeIndication) {
+ case GPAC_OTI_VIDEO_AVC:
+ case GPAC_OTI_VIDEO_SVC:
+ case GPAC_OTI_VIDEO_HEVC:
+ case GPAC_OTI_VIDEO_SHVC:
+ isNALUFile = 1;
+ break;
+ default:
+ isNALUFile = 0;
+ break;
+ }
- rvc_init(XDF_doc, VTLFolder, isAVCFile); //->data contains the uncompressed XDF
+ rvc_init(XDF_doc, VTLFolder, isNALUFile); //->data contains the uncompressed XDF
/*free data*/
gf_free(XDF_doc);
return GF_OK;
/*initialize the decoder */
- if (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_AVC) {
+ if ( (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_AVC) || (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_SVC)) {
GF_AVCConfig *cfg = gf_odf_avc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength);
if (!cfg) return GF_NON_COMPLIANT_BITSTREAM;
ctx->nalu_size_length = cfg->nal_unit_size;
ctx->pixel_ar = (par_n<<16) || par_d;
}
}
- /* call decode - warning for AVC: the data blocks do not contain startcode prefixes (00000001), you may need to add them) */
+ /* call decode*/
res = rvc_decode(slc->data, slc->size, &Picture, 1);
if (res<0) {
- GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[SVC Decoder] Error decoding SPS %d\n", res));
+ GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[RVC Decoder] Error decoding SPS %d\n", res));
}
-
}
count = gf_list_count(cfg->pictureParameterSets);
for (i=0; i<count; i++) {
GF_AVCConfigSlot *slc = gf_list_get(cfg->pictureParameterSets, i);
- /*same remark as above*/
-
-
res = rvc_decode(slc->data, slc->size, &Picture, 1);
if (res<0) {
- GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[SVC Decoder] Error decoding PPS %d\n", res));
+ GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[RVC Decoder] Error decoding PPS %d\n", res));
}
}
gf_odf_avc_cfg_del(cfg);
+ /*initialize the decoder */
+ } else if ( (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_HEVC) || (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_SHVC)) {
+ GF_HEVCConfig *cfg = gf_odf_hevc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, 0);
+ if (!cfg) return GF_NON_COMPLIANT_BITSTREAM;
+ ctx->nalu_size_length = cfg->nal_unit_size;
+
+ /*decode all NALUs*/
+ count = gf_list_count(cfg->param_array);
+ for (i=0; i<count; i++) {
+ u32 j, count2;
+ GF_HEVCParamArray *ar = gf_list_get(cfg->param_array, i);
+ count2 = gf_list_count(ar->nalus);
+ for (j=0; j<count2; j++) {
+ u32 w, h, par_n, par_d;
+ GF_AVCConfigSlot *slc = gf_list_get(ar->nalus, j);
+
+ if (ar->type==GF_HEVC_NALU_SEQ_PARAM) {
+ gf_hevc_get_sps_info(slc->data, slc->size, &slc->id, &w, &h, &par_n, &par_d);
+ /*by default use the base layer*/
+ if (!j) {
+ if ((ctx->width<w) || (ctx->height<h)) {
+ ctx->width = w;
+ ctx->height = h;
+ if ( ((s32)par_n>0) && ((s32)par_d>0) )
+ ctx->pixel_ar = (par_n<<16) || par_d;
+ }
+ }
+ }
+
+ /* call decode*/
+ res = rvc_decode(slc->data, slc->size, &Picture, 1);
+ if (res<0) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[RVC Decoder] Error decoding parameter set: %d\n", res));
+ }
+ }
+ }
+
+ gf_odf_hevc_cfg_del(cfg);
} else if (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_MPEG4_PART2) {
GF_M4VDecSpecInfo dsi;
GF_Err e;
res = rvc_decode(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &Picture, 1);
if (res<0) {
- GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[SVC Decoder] Error decoding PPS %d\n", res));
+ GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[RVC Decoder] Error decoding PPS %d\n", res));
}
res = rvc_decode(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &Picture, 1);
if (res<0) {
- GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[SVC Decoder] Error decoding PPS %d\n", res));
+ GF_LOG(GF_LOG_ERROR, GF_LOG_CODEC, ("[RVC Decoder] Error decoding PPS %d\n", res));
}
}
static GF_Err RVCD_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
if (!esd) return GF_CODEC_STREAM_TYPE_SUPPORTED;
switch (esd->decoderConfig->objectTypeIndication) {
case GPAC_OTI_VIDEO_AVC:
+ case GPAC_OTI_VIDEO_SVC:
case GPAC_OTI_VIDEO_MPEG4_PART2:
+ case GPAC_OTI_VIDEO_HEVC:
+ case GPAC_OTI_VIDEO_SHVC:
if (!esd->decoderConfig->rvc_config && !esd->decoderConfig->predefined_rvc_config) return GF_CODEC_NOT_SUPPORTED;
return GF_CODEC_SUPPORTED+1;
}
static const char *RVCD_GetCodecName(GF_BaseDecoder *dec)
{
- return "RVC Decoder";
+ return "Reconfigurable Video Decoder";
}
GF_BaseDecoder *NewRVCDec()
SDL_GetDesktopDisplayMode(0,&vinf);
dr->max_screen_width = vinf.w;
dr->max_screen_height = vinf.h;
+ dr->max_screen_bpp = 8;
#else
vinf = SDL_GetVideoInfo();
#if SDL_VERSION_ATLEAST(1, 2, 10)
dr->max_screen_width = vinf->current_w;
dr->max_screen_height = vinf->current_h;
+ dr->max_screen_bpp = 8;
#else
{
SDL_Rect** modes;
}
}
}
+ dr->max_screen_bpp = 8;
#endif /* versions prior to 1.2.10 do not have the size of screen */
#endif
{
SDLVid_DestroyObjects(ctx);
#if SDL_VERSION_ATLEAST(2,0,0)
- if ( ctx->gl_context )
+ if ( ctx->gl_context ) {
SDL_GL_DeleteContext(ctx->gl_context);
- if ( ctx->renderer )
+ ctx->gl_context = NULL;
+ }
+ if ( ctx->renderer ) {
SDL_DestroyRenderer(ctx->renderer);
- ctx->gl_context = NULL;
-
+ ctx->renderer = NULL;
+ }
+
/*iOS SDL2 has a nasty bug that breaks switching between 2D and GL context if we don't re-init the video subsystem*/
#ifdef GPAC_IPHONE
if ( ctx->screen ) {
dst.width = wndSurface->w;
dst.pitch_x = 0;
dst.pitch_y = wndSurface->pitch;
- dst.pixel_format = SDLVid_MapPixelFormat(wndSurface->format, SDL_FALSE);
+ dst.pixel_format = SDLVid_MapPixelFormat(wndSurface->format, GF_FALSE);
dst.video_buffer = (char*)wndSurface->pixels;
#else
SDL_LockSurface(ctx->screen);
/*avoids GCC warning*/
if (!obj) obj = NULL;
+#ifndef GPAC_CONFIG_DARWIN
if (!id) id=0;
- if (!vp) vp=0;
+#endif
+ if (!vp) vp=0;
return JS_TRUE;
}
wi->instance_id ++;
sprintf(szName, "%s#%s#Instance%d", path, wi->widget->name, wi->instance_id);
- sprintf(wi->secname, "Widget#%08X", gf_crc_32(szName, (u32) strlen(szName)));
+ sprintf((char *)wi->secname, "Widget#%08X", gf_crc_32(szName, (u32) strlen(szName)));
/*create section*/
gf_cfg_set_key(wm->term->user->config, "Widgets", (const char *) wi->secname, " ");
static Bool wm_enum_dir(void *cbk, char *file_name, char *file_path)
{
- return gf_enum_directory(file_path, 0, wm_enum_widget, cbk, "mgt");
+ return (gf_enum_directory(file_path, 0, wm_enum_widget, cbk, "mgt")==GF_OK) ? GF_FALSE : GF_TRUE;
}
u32 instID = ID ? atoi(ID) : 0;
GF_WidgetInstance *wi = wm_load_widget(wm, manifest, instID, 0);
if (wi) {
- strcpy(wi->secname, (const char *) name);
+ strcpy((char *)wi->secname, (const char *) name);
wm_widget_jsbind(wm, wi);
}
}
GF_Event evt;
XWindow *xWin = (XWindow *)vout->opaque;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_MMIO, ("[X11] Setting up GL for display %d\n", xWin->display));
- XSync(xWin->display, False);
- xWin->glx_context = glXCreateContext(xWin->display,xWin->glx_visualinfo, NULL, True);
- XSync(xWin->display, False);
- if (!xWin->glx_context) return GF_IO_ERR;
- if (xWin->output_3d_mode==2) return GF_IO_ERR;
+ if (!xWin->glx_visualinfo) return GF_IO_ERR;
+ memset(&evt, 0, sizeof(GF_Event));
+ if (!xWin->glx_context) {
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_MMIO, ("[X11] Setting up GL for display %d\n", xWin->display));
+ XSync(xWin->display, False);
+ xWin->glx_context = glXCreateContext(xWin->display,xWin->glx_visualinfo, NULL, True);
+ XSync(xWin->display, False);
+ if (!xWin->glx_context) return GF_IO_ERR;
+ if (xWin->output_3d_mode==2) return GF_IO_ERR;
+
+ evt.setup.hw_reset = 1;
+ }
if ( ! glXMakeCurrent(xWin->display, xWin->fullscreen ? xWin->full_wnd : xWin->wnd, xWin->glx_context) ) return GF_IO_ERR;
XSync(xWin->display, False);
- memset(&evt, 0, sizeof(GF_Event));
+
evt.type = GF_EVENT_VIDEO_SETUP;
- evt.setup.hw_reset = 1;
vout->on_event (vout->evt_cbk_hdl,&evt);
xWin->is_init = 1;
return GF_OK;
X11VID ();
xWindow->fullscreen = bFullScreenOn;
#ifdef GPAC_HAS_OPENGL
- if (xWindow->output_3d_mode==1) X11_ReleaseGL(xWindow);
+// if (xWindow->output_3d_mode==1) X11_ReleaseGL(xWindow);
#endif
if (bFullScreenOn) {
xWindow->screennum=0;
vout->max_screen_width = DisplayWidth(xWindow->display, xWindow->screennum);
vout->max_screen_height = DisplayHeight(xWindow->display, xWindow->screennum);
+ vout->max_screen_bpp = 8;
+
/*
* Full screen wnd
*/
}
retry_8bpp:
i=0;
- attribs[i++] = GLX_DRAWABLE_TYPE;
- attribs[i++] = GLX_WINDOW_BIT;
if (nb_bits>8) {
+ attribs[i++] = GLX_DRAWABLE_TYPE;
+ attribs[i++] = GLX_WINDOW_BIT;
attribs[i++] = GLX_RENDER_TYPE;
attribs[i++] = GLX_RGBA_BIT;
} else {
typedef int (* FnGlXGetFBConfigAttrib) (Display * dpy, GLXFBConfig config, int attribute, int * value);
- FnGlXChooseFBConfigProc my_glXChooseFBConfig = (FnGlXChooseFBConfigProc) glXGetProcAddress("glXChooseFBConfig");
- FnGlXGetVisualFromFBConfigProc my_glXGetVisualFromFBConfig = (FnGlXGetVisualFromFBConfigProc)glXGetProcAddress("glXGetVisualFromFBConfig");
- FnGlXGetFBConfigAttrib my_glXGetFBConfigAttrib = (FnGlXGetFBConfigAttrib)glXGetProcAddress("glXGetFBConfigAttrib");
+ FnGlXChooseFBConfigProc my_glXChooseFBConfig = (FnGlXChooseFBConfigProc) glXGetProcAddress((GLubyte*) "glXChooseFBConfig");
+ FnGlXGetVisualFromFBConfigProc my_glXGetVisualFromFBConfig = (FnGlXGetVisualFromFBConfigProc)glXGetProcAddress((GLubyte*) "glXGetVisualFromFBConfig");
+ FnGlXGetFBConfigAttrib my_glXGetFBConfigAttrib = (FnGlXGetFBConfigAttrib)glXGetProcAddress((GLubyte*) "glXGetFBConfigAttrib");
if (my_glXChooseFBConfig && my_glXGetVisualFromFBConfig) {
fb = my_glXChooseFBConfig(xWindow->display, xWindow->screennum, attribs, &fbcount);
nb_bits = 8;
goto retry_8bpp;
}
- xWindow->glx_visualinfo = my_glXGetVisualFromFBConfig(xWindow->display, fb[0]);
+ xWindow->glx_visualinfo = my_glXGetVisualFromFBConfig(xWindow->display, fb[0]);
if (my_glXGetFBConfigAttrib && fb) {
int r, g, b;
} else {
xWindow->glx_visualinfo = glXChooseVisual(xWindow->display, xWindow->screennum, attribs);
}
+ vout->max_screen_bpp = nb_bits;
if (!xWindow->glx_visualinfo) {
GF_LOG(GF_LOG_ERROR, GF_LOG_MMIO, ("[X11] Error selecting GL display\n"));
}
static GF_Err XVID_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
}
static GF_Err XVID_ProcessData(GF_MediaDecoder *ifcg,
char *inBuffer, u32 inBufferLength,
- u16 ES_ID,
+ u16 ES_ID, u32 *CTS,
char *outBuffer, u32 *outBufferLength,
u8 PaddingBits, u32 mmlevel)
{
ES_Descriptor {
ES_ID 3
muxInfo MuxInfo {
- fileName "../auxiliary_files/logo.jpg"
+ fileName "../auxiliary_files/logo.png"
}
}
]
u32 nbBits, val = 0;
if (sc_enc->emul) return;
if ((str[0]=='0') && (str[1]=='x' || str[1]=='X')) {
- val = strtoul(sc_enc->token, (char **) NULL, 16);
+ val = (u32) strtoul(sc_enc->token, (char **) NULL, 16);
} else if (str[0]=='0' && isdigit(str[1])) {
- val = strtoul(str, (char **) NULL, 8);
+ val = (u32) strtoul(str, (char **) NULL, 8);
} else if (isdigit(str[0])) {
- val = strtoul(str, (char **) NULL, 10);
+ val = (u32) strtoul(str, (char **) NULL, 10);
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[bifs] Script encoding: %s is not an integer\n", str));
sc_enc->err = GF_BAD_PARAM;
{
u32 val = 0;
if ((str[0]=='0') && (str[1]=='x' || str[1]=='X')) {
- val = strtoul(sc_enc->token, (char **) NULL, 16);
+ val = (u32) strtoul(sc_enc->token, (char **) NULL, 16);
} else if (str[0]=='0' && isdigit(str[1])) {
- val = strtoul(str, (char **) NULL, 8);
+ val = (u32) strtoul(str, (char **) NULL, 8);
} else if (isdigit(str[0])) {
- val = strtoul(str, (char **) NULL, 10);
+ val = (u32) strtoul(str, (char **) NULL, 10);
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[bifs] Script encoding: %s is not an integer\n", str));
sc_enc->err = GF_BAD_PARAM;
gf_mo_get_object_time(ai->stream, &obj_time);
obj_time += audio_delay_ms;
- drift = (s32)obj_time;
- drift -= (s32)ts;
-
+ if (ai->compositor->bench_mode) {
+ drift = 0;
+ } else {
+ drift = (s32)obj_time;
+ drift -= (s32)ts;
+ }
+
#ifdef ENABLE_EARLY_FRAME_DETECTION
/*too early (silence insertions), skip*/
if (drift < 0) {
GF_LOG(GF_LOG_INFO, GF_LOG_AUDIO, ("[Audio Input] audio too early of %d (CTS %u at OTB %u with audio delay %d ms)\n", drift + audio_delay_ms, ts, obj_time, audio_delay_ms));
ai->need_release = 0;
- gf_mo_release_data(ai->stream, 0, 0);
+ gf_mo_release_data(ai->stream, 0, -1);
*size = 0;
return NULL;
}
}
}
if (!ar->audio_out) {
+ GF_AudioOutput *raw_out = NULL;
count = gf_modules_get_count(ar->user->modules);
for (i=0; i<count; i++) {
ar->audio_out = (GF_AudioOutput *) gf_modules_load_interface(ar->user->modules, i, GF_AUDIO_OUTPUT_INTERFACE);
if (!ar->audio_out) continue;
+
+ //in enum mode, only use raw out if everything else failed ...
+ if (!stricmp(ar->audio_out->module_name, "Raw Audio Output")) {
+ raw_out = ar->audio_out;
+ ar->audio_out = NULL;
+ continue;
+ }
GF_LOG(GF_LOG_DEBUG, GF_LOG_AUDIO, ("[AudioRender] Audio output module %s loaded\n", ar->audio_out->module_name));
/*check that's a valid audio compositor*/
- if (ar->audio_out->SelfThreaded) {
- if (ar->audio_out->SetPriority) break;
- } else {
- if (ar->audio_out->WriteAudio) break;
+ if ((ar->audio_out->SelfThreaded && ar->audio_out->SetPriority) || ar->audio_out->WriteAudio) {
+ /*remember the module we use*/
+ gf_cfg_set_key(user->config, "Audio", "DriverName", ar->audio_out->module_name);
+ break;
}
gf_modules_close_interface((GF_BaseInterface *)ar->audio_out);
ar->audio_out = NULL;
}
+ if (raw_out) {
+ if (ar->audio_out) gf_modules_close_interface((GF_BaseInterface *)raw_out);
+ else ar->audio_out = raw_out;
+ }
}
/*if not init we run with a NULL audio compositor*/
if (ar->audio_out) {
-
ar->audio_out->FillBuffer = gf_ar_fill_output;
ar->audio_out->audio_renderer = ar;
GF_LOG(GF_LOG_DEBUG, GF_LOG_AUDIO, ("[AudioRender] Setting up audio module %s\n", ar->audio_out->module_name));
gf_modules_close_interface((GF_BaseInterface *)ar->audio_out);
ar->audio_out = NULL;
} else {
- /*remember the module we use*/
- gf_cfg_set_key(user->config, "Audio", "DriverName", ar->audio_out->module_name);
if (!ar->audio_out->SelfThreaded) {
ar->th = gf_th_new("AudioRenderer");
gf_th_run(ar->th, gf_ar_proc, ar);
GF_LOG(GF_LOG_INFO, GF_LOG_COMPOSE, ("[Compositor] Switching fullscreen %s\n", compositor->fullscreen ? "off" : "on"));
/*move to FS*/
compositor->fullscreen = !compositor->fullscreen;
+
+ gf_sc_ar_control(compositor->audio_renderer, 0);
+
if (compositor->fullscreen && (compositor->scene_width>=compositor->scene_height)
#ifndef GPAC_DISABLE_3D
&& !compositor->visual->type_3d
e = compositor->video_out->SetFullScreen(compositor->video_out, compositor->fullscreen, &compositor->display_width, &compositor->display_height);
}
+ gf_sc_ar_control(compositor->audio_renderer, 1);
+
if (e) {
GF_Event evt;
memset(&evt, 0, sizeof(GF_Event));
/*fullscreen on/off request*/
if (compositor->msg_type & GF_SR_CFG_FULLSCREEN) {
compositor->msg_type &= ~GF_SR_CFG_FULLSCREEN;
- gf_sc_set_fullscreen(compositor);
- gf_sc_next_frame_state(compositor, GF_SC_DRAW_FRAME);
- notif_size=1;
+ //video is about to resetup, wait for the setup
+ if (compositor->recompute_ar) {
+ compositor->fullscreen_postponed = 1;
+ } else {
+ gf_sc_set_fullscreen(compositor);
+ gf_sc_next_frame_state(compositor, GF_SC_DRAW_FRAME);
+ notif_size=1;
+ }
}
compositor->msg_type &= ~GF_SR_IN_RECONFIG;
}
}
}
-Bool gf_sc_draw_frame(GF_Compositor *compositor)
+GF_EXPORT
+Bool gf_sc_draw_frame(GF_Compositor *compositor, u32 *ms_till_next)
{
gf_sc_simulation_tick(compositor);
+ if (ms_till_next) {
+ if ((s32) compositor->next_frame_delay == -1)
+ *ms_till_next = compositor->frame_duration;
+ else
+ *ms_till_next = MIN(compositor->next_frame_delay, compositor->frame_duration);
+ }
if (compositor->frame_draw_type) return 1;
if (compositor->fonts_pending) return 1;
return GF_FALSE;
}
-
/*forces graphics redraw*/
GF_EXPORT
void gf_sc_reset_graphics(GF_Compositor *compositor)
gf_modules_close_interface((GF_BaseInterface *)compositor->video_out);
compositor->video_out = NULL;
}
- } else {
- GF_LOG(GF_LOG_WARNING, GF_LOG_CORE, ("Failed to load module %s, no video driver.\n", sOpt));
- sOpt = NULL;
}
}
if (!compositor->video_out) {
- u32 i, count;
- count = gf_modules_get_count(compositor->user->modules);
+ GF_VideoOutput *raw_out = NULL;
+ u32 i, count = gf_modules_get_count(compositor->user->modules);
GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("Trying to find a suitable video driver amongst %d modules...\n", count));
for (i=0; i<count; i++) {
compositor->video_out = (GF_VideoOutput *) gf_modules_load_interface(compositor->user->modules, i, GF_VIDEO_OUTPUT_INTERFACE);
if (!compositor->video_out) continue;
compositor->video_out->evt_cbk_hdl = compositor;
compositor->video_out->on_event = gf_sc_on_event;
+ //in enum mode, only use raw out if everything else failed ...
+ if (!stricmp(compositor->video_out->module_name, "Raw Video Output")) {
+ raw_out = compositor->video_out;
+ compositor->video_out = NULL;
+ continue;
+ }
+
/*init hw*/
if (compositor->video_out->Setup(compositor->video_out, compositor->user->os_window_handler, compositor->user->os_display, compositor->user->init_flags)==GF_OK) {
gf_cfg_set_key(compositor->user->config, "Video", "DriverName", compositor->video_out->module_name);
gf_modules_close_interface((GF_BaseInterface *)compositor->video_out);
compositor->video_out = NULL;
}
+ if (raw_out) {
+ if (compositor->video_out) gf_modules_close_interface((GF_BaseInterface *)raw_out);
+ else {
+ compositor->video_out = raw_out;
+ compositor->video_out ->Setup(compositor->video_out, compositor->user->os_window_handler, compositor->user->os_display, compositor->user->init_flags);
+ }
+ }
}
if (!compositor->video_out ) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("Failed to create compositor->video_out, did not find any suitable driver."));
}
compositor->textures = gf_list_new();
+ compositor->textures_gc = gf_list_new();
compositor->frame_rate = 30.0;
compositor->frame_duration = 33;
compositor->time_nodes = gf_list_new();
-#ifdef GF_SR_EVENT_QUEUE
- compositor->events = gf_list_new();
- compositor->ev_mx = gf_mx_new("EventQueue");
-#endif
+ compositor->event_queue = gf_list_new();
+ compositor->event_queue_back = gf_list_new();
+ compositor->evq_mx = gf_mx_new("EventQueue");
#ifdef GF_SR_USE_VIDEO_CACHE
compositor->cached_groups = gf_list_new();
compositor->interaction_level = GF_INTERACT_NORMAL | GF_INTERACT_INPUT_SENSOR | GF_INTERACT_NAVIGATION;
compositor->scene_sampled_clock = 0;
+ compositor->video_th_id = gf_th_id();
return GF_OK;
}
compositor->video_th_state = GF_COMPOSITOR_THREAD_RUN;
while (compositor->video_th_state == GF_COMPOSITOR_THREAD_RUN) {
- if (compositor->is_hidden==1)
+ if (compositor->is_hidden==1) {
+ if (!compositor->bench_mode) {
+ compositor->scene_sampled_clock = gf_sc_ar_get_clock(compositor->audio_renderer);
+ }
gf_sleep(compositor->frame_duration);
- else
+ } else
gf_sc_simulation_tick(compositor);
}
#ifndef GPAC_DISABLE_3D
visual_3d_reset_graphics(compositor->visual);
+ compositor_2d_reset_gl_auto(compositor);
#endif
+ gf_sc_texture_cleanup_hw(compositor);
+
+
/*destroy video out here if we're using openGL, to avoid threading issues*/
compositor->video_out->Shutdown(compositor->video_out);
gf_modules_close_interface((GF_BaseInterface *)compositor->video_out);
}
}
gf_th_del(compositor->VisualThread);
+ } else {
+#ifndef GPAC_DISABLE_3D
+ compositor_2d_reset_gl_auto(compositor);
+#endif
+ gf_sc_texture_cleanup_hw(compositor);
}
+
if (compositor->video_out) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor] Closing video output\n"));
compositor->video_out->Shutdown(compositor->video_out);
}
#ifndef GPAC_DISABLE_3D
- compositor_2d_reset_gl_auto(compositor);
if (compositor->unit_bbox) mesh_free(compositor->unit_bbox);
#endif
}
gf_list_del(compositor->proto_modules);
}
-#ifdef GF_SR_EVENT_QUEUE
- gf_mx_p(compositor->ev_mx);
- while (gf_list_count(compositor->events)) {
- GF_Event *ev = (GF_Event *)gf_list_get(compositor->events, 0);
- gf_list_rem(compositor->events, 0);
- gf_free(ev);
+ gf_mx_p(compositor->evq_mx);
+ while (gf_list_count(compositor->event_queue)) {
+ GF_QueuedEvent *qev = (GF_QueuedEvent *)gf_list_get(compositor->event_queue, 0);
+ gf_list_rem(compositor->event_queue, 0);
+ gf_free(qev);
}
- gf_mx_v(compositor->ev_mx);
- gf_mx_del(compositor->ev_mx);
- gf_list_del(compositor->events);
-#endif
+ while (gf_list_count(compositor->event_queue_back)) {
+ GF_QueuedEvent *qev = (GF_QueuedEvent *)gf_list_get(compositor->event_queue_back, 0);
+ gf_list_rem(compositor->event_queue, 0);
+ gf_free(qev);
+ }
+ gf_mx_v(compositor->evq_mx);
+ gf_mx_del(compositor->evq_mx);
+ gf_list_del(compositor->event_queue);
+ gf_list_del(compositor->event_queue_back);
if (compositor->font_manager) gf_font_manager_del(compositor->font_manager);
#endif
if (compositor->textures) gf_list_del(compositor->textures);
+ if (compositor->textures_gc) gf_list_del(compositor->textures_gc);
if (compositor->time_nodes) gf_list_del(compositor->time_nodes);
if (compositor->extra_scenes) gf_list_del(compositor->extra_scenes);
if (compositor->video_listeners) gf_list_del(compositor->video_listeners);
compositor_2d_set_user_transform(compositor, compositor->zoom, compositor->trans_x, compositor->trans_y, 1);
}
-static void gf_sc_reset(GF_Compositor *compositor)
+static void gf_sc_reset(GF_Compositor *compositor, Bool has_scene)
{
Bool draw_mode;
#ifndef GPAC_DISABLE_3D
//force a recompute of the canvas
- if (compositor->hybgl_txh) {
+ if (has_scene && compositor->hybgl_txh) {
compositor->hybgl_txh->width = compositor->hybgl_txh->height = 0;
}
#endif
gf_sc_ar_reset(compositor->audio_renderer);
}
-#ifdef GF_SR_EVENT_QUEUE
GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor] Reseting event queue\n"));
- gf_mx_p(compositor->ev_mx);
- while (gf_list_count(compositor->events)) {
- GF_Event *ev = (GF_Event*)gf_list_get(compositor->events, 0);
- gf_list_rem(compositor->events, 0);
- gf_free(ev);
+ gf_mx_p(compositor->evq_mx);
+ while (gf_list_count(compositor->event_queue)) {
+ GF_QueuedEvent *qev = (GF_QueuedEvent*)gf_list_get(compositor->event_queue, 0);
+ gf_list_rem(compositor->event_queue, 0);
+ gf_free(qev);
}
-#endif
+ gf_mx_v(compositor->evq_mx);
GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor] Reseting compositor module\n"));
/*reset main surface*/
- gf_sc_reset(compositor);
+ gf_sc_reset(compositor, scene_graph ? 1 : 0);
/*set current graph*/
compositor->scene = scene_graph;
}
gf_sc_reset_framerate(compositor);
-#ifdef GF_SR_EVENT_QUEUE
- gf_mx_v(compositor->ev_mx);
-#endif
gf_sc_lock(compositor, 0);
if (scene_graph)
#ifndef GPAC_DISABLE_3D
- sOpt = gf_cfg_get_key(compositor->user->config, "Compositor", "ForceOpenGL");
- compositor->force_opengl_2d = (sOpt && !strcmp(sOpt, "yes")) ? 1 : 0;
+ sOpt = gf_cfg_get_key(compositor->user->config, "Compositor", "OpenGLMode");
+ compositor->force_opengl_2d = (sOpt && !strcmp(sOpt, "always")) ? 1 : 0;
if (!sOpt) {
- compositor->visual->type_3d = 1;
compositor->recompute_ar = 1;
compositor->autoconfig_opengl = 1;
- }
-
+ } else {
+ compositor->hybrid_opengl = !strcmp(sOpt, "hybrid") ? 1 : 0;
#ifdef OPENGL_RASTER
- compositor->opengl_raster = (sOpt && !strcmp(sOpt, "raster")) ? 1 : 0;
- if (compositor->opengl_raster) compositor->traverse_state->immediate_draw = GF_TRUE;
+ compositor->opengl_raster = !strcmp(sOpt, "raster") ? 1 : 0;
+ if (compositor->opengl_raster) compositor->traverse_state->immediate_draw = GF_TRUE;
#endif
+ }
- compositor->hybrid_opengl = (sOpt && !strcmp(sOpt, "hybrid")) ? 1 : 0;
+ sOpt = gf_cfg_get_key(compositor->user->config, "Compositor", "EnablePBO");
+ if (!sOpt) gf_cfg_set_key(compositor->user->config, "Compositor", "EnablePBO", "no");
+ compositor->enable_pbo = (sOpt && !strcmp(sOpt, "yes")) ? 1 : 0;
sOpt = gf_cfg_get_key(compositor->user->config, "Compositor", "DefaultNavigationMode");
if (sOpt && !strcmp(sOpt, "Walk")) compositor->default_navigation_mode = GF_NAVIGATE_WALK;
return e;
}
-u32 gf_sc_get_option(GF_Compositor *compositor, u32 type)
+Bool gf_sc_is_over(GF_Compositor *compositor, GF_SceneGraph *scene_graph)
{
- switch (type) {
- case GF_OPT_PLAY_STATE: return compositor->paused ? 1 : 0;
- case GF_OPT_OVERRIDE_SIZE: return (compositor->override_size_flags & 1) ? 1 : 0;
- case GF_OPT_IS_FINISHED:
- if (compositor->interaction_sensors) return 0;
- case GF_OPT_IS_OVER:
- {
- u32 i, count;
- count = gf_list_count(compositor->time_nodes);
- for (i=0; i<count; i++) {
- GF_TimeNode *tn = (GF_TimeNode *)gf_list_get(compositor->time_nodes, i);
- if (tn->needs_unregister) continue;
- switch (gf_node_get_tag((GF_Node *)tn->udta)) {
+ u32 i, count;
+ count = gf_list_count(compositor->time_nodes);
+ for (i=0; i<count; i++) {
+ GF_TimeNode *tn = (GF_TimeNode *)gf_list_get(compositor->time_nodes, i);
+ if (tn->needs_unregister) continue;
+
+ if (scene_graph && (gf_node_get_graph((GF_Node *)tn->udta) != scene_graph))
+ continue;
+
+ switch (gf_node_get_tag((GF_Node *)tn->udta)) {
#ifndef GPAC_DISABLE_VRML
- case TAG_MPEG4_TimeSensor:
+ case TAG_MPEG4_TimeSensor:
#endif
#ifndef GPAC_DISABLE_X3D
- case TAG_X3D_TimeSensor:
+ case TAG_X3D_TimeSensor:
#endif
- return 0;
+ return 0;
#ifndef GPAC_DISABLE_VRML
- case TAG_MPEG4_MovieTexture:
+ case TAG_MPEG4_MovieTexture:
#ifndef GPAC_DISABLE_X3D
- case TAG_X3D_MovieTexture:
+ case TAG_X3D_MovieTexture:
#endif
- if (((M_MovieTexture *)tn->udta)->loop) return 0;
- break;
- case TAG_MPEG4_AudioClip:
+ if (((M_MovieTexture *)tn->udta)->loop) return 0;
+ break;
+ case TAG_MPEG4_AudioClip:
#ifndef GPAC_DISABLE_X3D
- case TAG_X3D_AudioClip:
+ case TAG_X3D_AudioClip:
#endif
- if (((M_AudioClip*)tn->udta)->loop) return 0;
- break;
- case TAG_MPEG4_AnimationStream:
- if (((M_AnimationStream*)tn->udta)->loop) return 0;
- break;
+ if (((M_AudioClip*)tn->udta)->loop) return 0;
+ break;
+ case TAG_MPEG4_AnimationStream:
+ if (((M_AnimationStream*)tn->udta)->loop) return 0;
+ break;
#endif
- }
}
}
- /*FIXME - this does not work with SVG/SMIL*/
- return 1;
+ /*FIXME - this does not work with SVG/SMIL*/
+ return 1;
+}
+
+u32 gf_sc_get_option(GF_Compositor *compositor, u32 type)
+{
+ switch (type) {
+ case GF_OPT_PLAY_STATE: return compositor->paused ? 1 : 0;
+ case GF_OPT_OVERRIDE_SIZE: return (compositor->override_size_flags & 1) ? 1 : 0;
+ case GF_OPT_IS_FINISHED:
+ if (compositor->interaction_sensors) return 0;
+ case GF_OPT_IS_OVER:
+ return gf_sc_is_over(compositor, NULL);
case GF_OPT_STRESS_MODE: return compositor->stress_mode;
case GF_OPT_AUDIO_VOLUME: return compositor->audio_renderer->volume;
case GF_OPT_AUDIO_PAN: return compositor->audio_renderer->pan;
}
#endif
+ gf_sc_ar_control(compositor->audio_renderer, 0);
#ifndef GPAC_DISABLE_3D
if (compositor->autoconfig_opengl) {
compositor->visual->type_3d = 1;
#endif
compositor->autoconfig_opengl = 0;
- //to change to "auto" once the GL auto mode is stable
-#if 0
+ //enable hybrid mode by default
if (compositor->visual->yuv_rect_glsl_program) {
- gf_cfg_set_key(compositor->user->config, "Compositor", "ForceOpenGL", "yes");
- compositor->force_opengl_2d = 1;
+ gf_cfg_set_key(compositor->user->config, "Compositor", "OpenGLMode", "hybrid");
+ compositor->force_opengl_2d = 0;
+ compositor->hybrid_opengl = 1;
} else {
- gf_cfg_set_key(compositor->user->config, "Compositor", "ForceOpenGL", "no");
+ gf_cfg_set_key(compositor->user->config, "Compositor", "OpenGLMode", "disable");
compositor->force_opengl_2d = 0;
compositor->visual->type_3d = 0;
}
-#endif
}
}
#ifndef GPAC_USE_OGL_ES
visual_3d_init_yuv_shader(compositor->visual);
#endif
- ra_init(&compositor->visual->hybgl_drawn);
+ if (!compositor->visual->hybgl_drawn.list) {
+ ra_init(&compositor->visual->hybgl_drawn);
+ }
}
#endif
}
+
+ gf_sc_ar_control(compositor->audio_renderer, 1);
+
gf_sc_next_frame_state(compositor, GF_SC_DRAW_NONE);
#ifndef GPAC_DISABLE_LOG
#endif
compositor_evaluate_envtests(compositor, 0);
+
+ //fullscreen was postponed, retry now that the AR has been recomputed
+ if (compositor->fullscreen_postponed) {
+ compositor->fullscreen_postponed = 0;
+ compositor->msg_type |= GF_SR_CFG_FULLSCREEN;
+ }
+
}
}
extern u32 time_spent_in_anim;
#endif
+static void compositor_release_textures(GF_Compositor *compositor, Bool frame_drawn)
+{
+ u32 i, count;
+ /*release all textures - we must release them to handle a same OD being used by several textures*/
+ count = gf_list_count(compositor->textures);
+ for (i=0; i<count; i++) {
+ GF_TextureHandler *txh = (GF_TextureHandler *)gf_list_get(compositor->textures, i);
+ gf_sc_texture_release_stream(txh);
+ if (frame_drawn && txh->tx_io && !(txh->flags & GF_SR_TEXTURE_USED))
+ gf_sc_texture_reset(txh);
+ /*remove the use flag*/
+ txh->flags &= ~GF_SR_TEXTURE_USED;
+ }
+}
+
+
void gf_sc_simulation_tick(GF_Compositor *compositor)
{
GF_SceneGraph *sg;
+ GF_List *temp_queue;
u32 in_time, end_time, i, count;
Bool frame_drawn, has_timed_nodes=GF_FALSE, all_tx_done=GF_TRUE;
#ifndef GPAC_DISABLE_LOG
/*lock compositor for the whole cycle*/
gf_sc_lock(compositor, 1);
+ gf_sc_texture_cleanup_hw(compositor);
+
/*first thing to do, let the video output handle user event if it is not threaded*/
compositor->video_out->ProcessEvent(compositor->video_out, NULL);
if (compositor->freeze_display) {
gf_sc_lock(compositor, 0);
+ if (!compositor->bench_mode) {
+ compositor->scene_sampled_clock = gf_sc_ar_get_clock(compositor->audio_renderer);
+ }
if (!compositor->no_regulation) gf_sleep(compositor->frame_duration);
return;
}
visual_reset_graphics(compositor->visual);
}
-#ifdef GF_SR_EVENT_QUEUE
/*process pending user events*/
#ifndef GPAC_DISABLE_LOG
event_time = gf_sys_clock();
#endif
- gf_mx_p(compositor->ev_mx);
- while (gf_list_count(compositor->events)) {
- Bool ret;
- GF_Event *ev = (GF_Event*)gf_list_get(compositor->events, 0);
- gf_list_rem(compositor->events, 0);
- ret = gf_sc_exec_event(compositor, ev);
- gf_free(ev);
+ //swap event queus
+ gf_mx_p(compositor->evq_mx);
+ temp_queue = compositor->event_queue;
+ compositor->event_queue = compositor->event_queue_back;
+ compositor->event_queue_back = temp_queue;
+ gf_mx_v(compositor->evq_mx);
+ while (gf_list_count(compositor->event_queue_back)) {
+ GF_QueuedEvent *qev = (GF_QueuedEvent*)gf_list_get(compositor->event_queue_back, 0);
+ gf_list_rem(compositor->event_queue_back, 0);
+
+ if (qev->target) {
+#ifndef GPAC_DISABLE_SVG
+ gf_sg_fire_dom_event(qev->target, &qev->dom_evt, qev->sg, NULL);
+#endif
+ } else if (qev->node) {
+#ifndef GPAC_DISABLE_SVG
+ gf_dom_event_fire(qev->node, &qev->dom_evt);
+#endif
+ } else {
+ gf_sc_exec_event(compositor, &qev->evt);
+ }
+ gf_free(qev);
}
- gf_mx_v(compositor->ev_mx);
#ifndef GPAC_DISABLE_LOG
event_time = gf_sys_clock() - event_time;
#endif
-#elif !defined(GPAC_DISABLE_LOG)
- event_time = 0;
-#endif
-
+
+
+ if (!compositor->bench_mode) {
+ compositor->scene_sampled_clock = gf_sc_ar_get_clock(compositor->audio_renderer);
+ } else {
+ if (compositor->force_bench_frame==1) {
+ //a system frame is pending on a future frame - we must increase our time
+ compositor->scene_sampled_clock += compositor->frame_duration;
+ }
+ compositor->force_bench_frame = 0;
+ }
+
//first update all natural textures to figure out timing
compositor->frame_delay = (u32) -1;
gf_sc_lock(compositor, 0);
return;
}
+
+
#ifndef GPAC_DISABLE_LOG
texture_time = gf_sys_clock() - texture_time;
#endif
+ //this is correct but doesn't bring much and we may actually waste time while sleeping that could be used for texture upload - we prefer sleeping at the end of the pass
+#if 0
//if next video frame is due in this render cycle, wait until it matures
if ((compositor->frame_delay > 0) && (compositor->frame_delay != (u32) -1)) {
u32 diff=0;
- while (1) {
+ compositor->frame_delay = MIN(compositor->frame_delay, (s32) compositor->frame_duration);
+ while (!compositor->video_frame_pending) {
gf_sleep(0);
diff = gf_sys_clock() - in_time;
if (diff >= (u32) compositor->frame_delay)
break;
}
- GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor] Waited %d ms for next frame and %d ms was required\n", diff, compositor->frame_delay));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[Compositor] Waited %d ms for next frame and %d ms was required\n", diff, compositor->frame_delay));
if (compositor->next_frame_delay != (u32) -1) {
if (diff < compositor->next_frame_delay) compositor->next_frame_delay -= diff;
else compositor->next_frame_delay = 1;
}
}
+#endif
- if (!compositor->bench_mode) {
- compositor->scene_sampled_clock = gf_sc_ar_get_clock(compositor->audio_renderer);
- } else {
- if (compositor->force_bench_frame==1) {
- //a system frame is pending on a future frame - we must increase our time
- compositor->scene_sampled_clock += compositor->frame_duration;
- }
- compositor->force_bench_frame = 0;
- }
#ifndef GPAC_DISABLE_SVG
/*if invalidated, draw*/
if (compositor->frame_draw_type) {
GF_Window rc;
+ Bool textures_released = 0;
+
#ifndef GPAC_DISABLE_LOG
traverse_time = gf_sys_clock();
time_spent_in_anim = 0;
compositor->frame_draw_type = 0;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor] Redrawing scene - OTB %d\n", compositor->scene_sampled_clock));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor] Redrawing scene - STB %d\n", compositor->scene_sampled_clock));
gf_sc_draw_scene(compositor);
#ifndef GPAC_DISABLE_LOG
traverse_time = gf_sys_clock() - traverse_time;
if(compositor->user->init_flags & GF_TERM_INIT_HIDE)
compositor->skip_flush = 1;
+ //if no overlays, release textures before flushing, otherwise we might loose time waiting for vsync
+ if (!compositor->visual->has_overlays) {
+ compositor_release_textures(compositor, frame_drawn);
+ textures_released = 1;
+ }
+
if (compositor->skip_flush!=1) {
+
//release compositor in case we have vsync
gf_sc_lock(compositor, 0);
rc.x = rc.y = 0;
} else {
compositor->skip_flush = 0;
}
+
#ifndef GPAC_DISABLE_LOG
flush_time = gf_sys_clock() - flush_time;
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[Compositor] done flushing frame in %d ms\n", flush_time));
#endif
visual_2d_draw_overlays(compositor->visual);
compositor->last_had_overlays = compositor->visual->has_overlays;
+ if (!textures_released) {
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[Compositor] Releasing textures after flush\n" ));
+ compositor_release_textures(compositor, frame_drawn);
+ }
+
if (compositor->stress_mode) {
gf_sc_next_frame_state(compositor, GF_SC_DRAW_FRAME);
gf_sc_reset_graphics(compositor);
}
compositor->reset_fonts = 0;
+
} else {
+
+ //frame not drawn, release textures
+ compositor_release_textures(compositor, frame_drawn);
+
#ifndef GPAC_DISABLE_LOG
traverse_time = 0;
time_spent_in_anim = 0;
}
compositor->reset_graphics = 0;
- /*release all textures - we must release them to handle a same OD being used by several textures*/
- count = gf_list_count(compositor->textures);
- for (i=0; i<count; i++) {
- GF_TextureHandler *txh = (GF_TextureHandler *)gf_list_get(compositor->textures, i);
- gf_sc_texture_release_stream(txh);
- if (frame_drawn && txh->tx_io && !(txh->flags & GF_SR_TEXTURE_USED))
- gf_sc_texture_reset(txh);
- /*remove the use flag*/
- txh->flags &= ~GF_SR_TEXTURE_USED;
- }
-
- GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor] Frame %sdrawn\n", frame_drawn ? "" : "not " ));
-
compositor->last_frame_time = gf_sys_clock();
end_time = compositor->last_frame_time - in_time;
//in bench mode we always increase the clock of the fixed target simulation rate - this needs refinement if video is used ...
compositor->scene_sampled_clock += compositor->frame_duration;
}
+ compositor->video_frame_pending=0;
gf_sc_lock(compositor, 0);
#if 0
//we have a pending frame, return asap - we could sleep until frames matures but this give weird regulation
if (compositor->next_frame_delay != (u32) -1) {
+ if (compositor->next_frame_delay>end_time) compositor->next_frame_delay-=end_time;
+ else compositor->next_frame_delay=0;
+
+ compositor->next_frame_delay = MIN(compositor->next_frame_delay, 2*compositor->frame_duration);
if (compositor->next_frame_delay>2) {
u32 diff=0;
- while (! compositor->msg_type) {
+ while (! compositor->msg_type && ! compositor->video_frame_pending) {
gf_sleep(1);
diff = gf_sys_clock() - in_time;
if (diff >= (u32) compositor->next_frame_delay)
static Bool gf_sc_handle_event_intern(GF_Compositor *compositor, GF_Event *event, Bool from_user)
{
-#ifdef GF_SR_EVENT_QUEUE
- GF_Event *ev;
-#else
Bool ret;
- u32 retry;
-#endif
if (compositor->term && (compositor->interaction_level & GF_INTERACT_INPUT_SENSOR) && (event->type<=GF_EVENT_MOUSEWHEEL)) {
GF_Event evt = *event;
return 0;
}
*/
-#ifdef GF_SR_EVENT_QUEUE
- switch (event->type) {
- case GF_EVENT_MOUSEMOVE:
- {
- u32 i, count;
- gf_mx_p(compositor->ev_mx);
- count = gf_list_count(compositor->events);
- for (i=0; i<count; i++) {
- ev = (GF_Event *)gf_list_get(compositor->events, i);
- if (ev->type == GF_EVENT_MOUSEMOVE) {
- ev->mouse = event->mouse;
- gf_mx_v(compositor->ev_mx);
- return 1;
- }
- }
- gf_mx_v(compositor->ev_mx);
- }
- default:
- ev = (GF_Event *)gf_malloc(sizeof(GF_Event));
- ev->type = event->type;
- if (event->type<=GF_EVENT_MOUSEWHEEL) {
- ev->mouse = event->mouse;
- } else if (event->type==GF_EVENT_TEXTINPUT) {
- ev->character = event->character;
- } else {
- ev->key = event->key;
- }
- gf_mx_p(compositor->ev_mx);
- gf_list_add(compositor->events, ev);
- gf_mx_v(compositor->ev_mx);
- break;
- }
- return 0;
-#else
-
- retry = 100;
- while (retry) {
- if (gf_mx_try_lock(compositor->mx))
- break;
- retry--;
- gf_sleep(0);
- if (!retry) {
- return GF_FALSE;
- }
- }
+ gf_mx_p(compositor->mx);
ret = gf_sc_exec_event(compositor, event);
gf_sc_lock(compositor, GF_FALSE);
- if (!from_user) {
- }
+// if (!from_user) { }
return ret;
-#endif
}
void gf_sc_traverse_subscene(GF_Compositor *compositor, GF_Node *inline_parent, GF_SceneGraph *subscene, void *rs)
return compositor->texture_from_decoder_memory;
}
-void gf_sc_get_av_caps(GF_Compositor *compositor, u32 *width, u32 *height, u32 *bpp, u32 *channels, u32 *sample_rate)
+void gf_sc_get_av_caps(GF_Compositor *compositor, u32 *width, u32 *height, u32 *display_bit_depth, u32 *audio_bpp, u32 *channels, u32 *sample_rate)
{
if (width) *width = compositor->video_out->max_screen_width;
if (height) *height = compositor->video_out->max_screen_height;
+ if (display_bit_depth) *display_bit_depth = compositor->video_out->max_screen_bpp ? compositor->video_out->max_screen_bpp : 8;
//to do
- if (bpp) *bpp = 8;
+ if (audio_bpp) *audio_bpp = 8;
if (channels) *channels = 0;
if (sample_rate) *sample_rate = 48000;
}
compositor->force_bench_frame = 2;
}
}
+
+void gf_sc_set_video_pending_frame(GF_Compositor *compositor)
+{
+ compositor->video_frame_pending = GF_TRUE;
+}
+
+void gf_sc_queue_dom_event(GF_Compositor *compositor, GF_Node *node, GF_DOM_Event *evt)
+{
+ u32 i, count;
+ GF_QueuedEvent *qev;
+ gf_mx_p(compositor->evq_mx);
+
+ count = gf_list_count(compositor->event_queue);
+ for (i=0; i<count; i++) {
+ qev = gf_list_get(compositor->event_queue, i);
+ if ((qev->node==node) && (qev->dom_evt.type==evt->type)) {
+ qev->dom_evt = *evt;
+ gf_mx_v(compositor->evq_mx);
+ return;
+ }
+ }
+ GF_SAFEALLOC(qev, GF_QueuedEvent);
+ qev->node = node;
+ qev->dom_evt = *evt;
+ gf_list_add(compositor->event_queue, qev);
+ gf_mx_v(compositor->evq_mx);
+}
+
+void gf_sc_queue_dom_event_on_target(GF_Compositor *compositor, GF_DOM_Event *evt, GF_DOMEventTarget *target, GF_SceneGraph *sg)
+{
+ u32 i, count;
+ GF_QueuedEvent *qev;
+ gf_mx_p(compositor->evq_mx);
+
+ count = gf_list_count(compositor->event_queue);
+ for (i=0; i<count; i++) {
+ qev = gf_list_get(compositor->event_queue, i);
+ if ((qev->target==target) && (qev->dom_evt.type==evt->type) && (qev->sg==sg) ) {
+ qev->dom_evt = *evt;
+ gf_mx_v(compositor->evq_mx);
+ return;
+ }
+ }
+
+ GF_SAFEALLOC(qev, GF_QueuedEvent);
+ qev->sg = sg;
+ qev->target = target;
+ qev->dom_evt = *evt;
+ gf_list_add(compositor->event_queue, qev);
+ gf_mx_v(compositor->evq_mx);
+}
+
+static void sc_cleanup_event_queue(GF_List *evq, GF_Node *node, GF_SceneGraph *sg)
+{
+ u32 i, count = gf_list_count(evq);
+ for (i=0; i<count; i++) {
+ Bool del = 0;
+ GF_QueuedEvent *qev = gf_list_get(evq, i);
+ if (qev->node) {
+ if (node && qev->node)
+ del = 1;
+ if (sg && (gf_node_get_graph(qev->node)==sg))
+ del = 1;
+ }
+ if (qev->sg==sg)
+ del = 1;
+ else if (qev->target && (qev->target->ptr_type == GF_DOM_EVENT_TARGET_NODE)) {
+ if (node && ((GF_Node *)qev->target->ptr==node))
+ del = 1;
+ if (sg && (gf_node_get_graph((GF_Node *)qev->target->ptr)==sg))
+ del = 1;
+ }
+
+ if (del) {
+ gf_list_rem(evq, i);
+ i--;
+ count--;
+ gf_free(qev);
+ }
+ }
+}
+
+void gf_sc_node_destroy(GF_Compositor *compositor, GF_Node *node, GF_SceneGraph *sg)
+{
+ gf_mx_p(compositor->evq_mx);
+ sc_cleanup_event_queue(compositor->event_queue, node, sg);
+ sc_cleanup_event_queue(compositor->event_queue_back, node, sg);
+ gf_mx_v(compositor->evq_mx);
+}
gf_mo_get_object_time(txh->stream, &ck);
if (ck>txh->last_frame_time) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor2D] Bliting frame (CTS %d) %d ms too late\n", txh->last_frame_time, ck - txh->last_frame_time ));
- }
+ }
if (txh->nb_frames==100) {
txh->nb_frames = 0;
txh->upload_time = 0;
txh->nb_frames ++;
txh->upload_time += push_time;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[2D Blitter] Blit texure (CTS %d) %d ms after due date - blit in %d ms - average push time %d ms\n", txh->last_frame_time, ck - txh->last_frame_time, push_time, txh->upload_time / txh->nb_frames));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[2D Blitter] At %d Blit texture (CTS %d) %d ms after due date - blit in %d ms - average push time %d ms\n", ck, txh->last_frame_time, ck - txh->last_frame_time, push_time, txh->upload_time / txh->nb_frames));
}
#endif
#endif
video_src.video_buffer = txh->data;
if (txh->raw_memory) {
- video_src.u_ptr = txh->pU;
- video_src.v_ptr = txh->pV;
+ video_src.u_ptr = (char *) txh->pU;
+ video_src.v_ptr = (char *) txh->pV;
}
- if (overlay_type) {
- u32 push_time;
+ //overlay queing
+ if (overlay_type==2) {
+ GF_IRect o_rc;
+ GF_OverlayStack *ol, *first;
+
+ /*queue overlay in order*/
+ GF_SAFEALLOC(ol, GF_OverlayStack);
+ ol->ctx = ctx;
+ ol->dst = dst_wnd;
+ ol->src = src_wnd;
+ first = visual->overlays;
+ if (first) {
+ while (first->next) first = first->next;
+ first->next = ol;
+ } else {
+ visual->overlays = ol;
+ }
- if (overlay_type==2) {
- GF_IRect o_rc;
- GF_OverlayStack *ol, *first;
-
- /*queue overlay in order*/
- GF_SAFEALLOC(ol, GF_OverlayStack);
- ol->ctx = ctx;
- ol->dst = dst_wnd;
- ol->src = src_wnd;
- first = visual->overlays;
- if (first) {
- while (first->next) first = first->next;
- first->next = ol;
- } else {
- visual->overlays = ol;
- }
+ if (visual->center_coords) {
+ o_rc.x = dst_wnd.x - output_width/2;
+ o_rc.y = output_height/2- dst_wnd.y;
+ } else {
+ o_rc.x = dst_wnd.x;
+ o_rc.y = dst_wnd.y + dst_wnd.h;
+ }
- if (visual->center_coords) {
- o_rc.x = dst_wnd.x - output_width/2;
- o_rc.y = output_height/2- dst_wnd.y;
- } else {
- o_rc.x = dst_wnd.x;
- o_rc.y = dst_wnd.y + dst_wnd.h;
- }
+ o_rc.width = dst_wnd.w;
+ o_rc.height = dst_wnd.h;
+ visual->ClearSurface(visual, &o_rc, visual->compositor->video_out->overlay_color_key);
+ visual->has_overlays = GF_TRUE;
+ /*mark drawable as overlay*/
+ ctx->drawable->flags |= DRAWABLE_IS_OVERLAY;
- o_rc.width = dst_wnd.w;
- o_rc.height = dst_wnd.h;
- visual->ClearSurface(visual, &o_rc, visual->compositor->video_out->overlay_color_key);
- visual->has_overlays = GF_TRUE;
- /*mark drawable as overlay*/
- ctx->drawable->flags |= DRAWABLE_IS_OVERLAY;
+ /*prevents this context from being removed in direct draw mode by requesting a new one
+ but not allocating it*/
+ if (tr_state->immediate_draw)
+ visual_2d_get_drawable_context(visual);
+
+ return GF_TRUE;
+ }
+
+ //will pause clock if first HW load
+ gf_sc_texture_check_pause_on_first_load(txh);
+
+ if (overlay_type) {
+ u32 push_time;
- /*prevents this context from being removed in direct draw mode by requesting a new one
- but not allocating it*/
- if (tr_state->immediate_draw)
- visual_2d_get_drawable_context(visual);
- return GF_TRUE;
- }
/*top level overlay*/
if (flush_video) {
GF_Window rc;
push_time = gf_sys_clock();
e = visual->compositor->video_out->Blit(visual->compositor->video_out, &video_src, &src_wnd, &dst_wnd, 1);
+
if (!e) {
#ifndef GPAC_DISABLE_LOG
log_blit_times(txh, push_time);
/*mark drawable as overlay*/
ctx->drawable->flags |= DRAWABLE_IS_OVERLAY;
visual->has_overlays = GF_TRUE;
+
+ //will resume clock if first HW load
+ gf_sc_texture_check_pause_on_first_load(txh);
return GF_TRUE;
}
GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor2D] Error during overlay blit - trying with soft one\n"));
if (!use_soft_stretch) {
u32 push_time = gf_sys_clock();
e = visual->compositor->video_out->Blit(visual->compositor->video_out, &video_src, &src_wnd, &dst_wnd, 0);
+
/*HW pb, try soft*/
if (e) {
use_soft_stretch = GF_TRUE;
}
#endif
}
+
+ //will resume clock if first HW load
+ gf_sc_texture_check_pause_on_first_load(txh);
+
if (use_soft_stretch) {
GF_VideoSurface backbuffer;
e = visual->compositor->video_out->LockBackBuffer(visual->compositor->video_out, &backbuffer, GF_TRUE);
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor2D] Cannot lock back buffer - Error %s\n", gf_error_to_string(e) ));
if (is_attached) visual_2d_init_raster(visual);
+
return GF_FALSE;
}
if (!visual->compositor->video_memory) {
}
}
visual->has_modif = GF_TRUE;
- if (is_attached) visual_2d_init_raster(visual);
+// if (is_attached) visual_2d_init_raster(visual);
return GF_TRUE;
}
if (compositor->was_system_memory != evt.setup.system_memory) changed = 1;
else if (old_vp_width != compositor->vp_width) changed=1;
else if (old_vp_height != compositor->vp_height) changed=1;
- else if (compositor->was_opengl != evt.setup.opengl_mode) changed=1;
+ else if (compositor->is_opengl != evt.setup.opengl_mode) changed=1;
if (changed) {
return e;
}
- compositor->was_opengl = evt.setup.opengl_mode;
+ compositor->is_opengl = evt.setup.opengl_mode;
compositor->was_system_memory = evt.setup.system_memory;
}
if (compositor->has_size_info) {
evt.setup.opengl_mode = 0;
#else
evt.setup.opengl_mode = 1;
+ compositor->is_opengl = 1;
#endif
if (compositor->video_out->ProcessEvent(compositor->video_out, &evt)<0) {
if (!alpha) alpha = GF_COL_A(asp->line_color);
/*texture is available in hw, use it - if blending, force using texture*/
- if (!gf_sc_texture_needs_reload(txh) || (alpha != 0xFF) || use_texture
+ if (use_texture || !gf_sc_texture_needs_reload(txh) || (alpha != 0xFF)
#ifdef GF_SR_USE_DEPTH
|| tr_state->depth_offset
#endif
GLDECL(void, glBindBuffer, (GLenum, GLuint ) )
GLDECL(void, glBufferData, (GLenum, int, void *, GLenum) )
GLDECL(void, glBufferSubData, (GLenum, int, int, void *) )
+GLDECL(void *, glMapBuffer, (GLenum, GLenum) )
+GLDECL(void *, glUnmapBuffer, (GLenum) )
#endif //GL_VERSION_1_5
#define GL_STENCIL_BACK_REF 0x8CA3
#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4
#define GL_STENCIL_BACK_WRITEMASK 0x8CA5
+#define GL_PIXEL_UNPACK_BUFFER_ARB 0x88EC
+#define GL_STREAM_DRAW_ARB 0x88E0
+#define GL_WRITE_ONLY_ARB 0x88B9
+#define GL_DYNAMIC_DRAW_ARB 0x88E8
GLDECL(GLuint, glCreateProgram, (void) )
if (!stack->txh.is_open) {
scene_time -= mt->startTime;
gf_sc_texture_play_from_to(&stack->txh, &mt->url, scene_time, -1, gf_mo_get_loop(stack->txh.stream, mt->loop), 0);
+ } else if (stack->first_frame_fetched) {
+ gf_mo_resume(stack->txh.stream);
}
gf_mo_set_speed(stack->txh.stream, mt->speed);
}
if (!txnode->isActive && st->first_frame_fetched) return;
/*when fetching the first frame disable resync*/
- gf_sc_texture_update_frame(txh, !txnode->isActive);
+ gf_sc_texture_update_frame(txh, 0);
if (txh->stream_finished) {
if (movietexture_get_loop(st, txnode)) {
gf_node_event_out_str(txh->owner, "duration_changed");
/*stop stream if needed*/
if (!txnode->isActive && txh->is_open) {
- gf_sc_texture_stop(txh);
+ gf_mo_pause(txh->stream);
/*make sure the refresh flag is not cleared*/
txh->needs_refresh = 1;
gf_sc_invalidate(txh->compositor, NULL);
stack->fetch_first_frame = 0;
if (!stack->txh.is_open)
gf_sc_texture_play(&stack->txh, &mt->url);
+ else
+ gf_mo_resume(stack->txh.stream);
}
return;
}
evt.prev_translate.x = stack->vp.x;
evt.prev_translate.y = stack->vp.y;
evt.type = GF_EVENT_VP_RESIZE;
- gf_scene_notify_event(scene, 0, NULL, &evt, GF_OK);
+ gf_scene_notify_event(scene, 0, NULL, &evt, GF_OK, GF_TRUE);
}
}
gf_node_dirty_clear(stack->graph->node, GF_SG_SVG_GEOMETRY_DIRTY);
}
+static void svg_open_texture(SVG_video_stack *stack)
+{
+ gf_sc_texture_open(&stack->txh, &stack->txurl, GF_FALSE);
+}
static void svg_play_texture(SVG_video_stack *stack, SVGAllAttributes *atts)
{
DrawableContext *ctx;
SVGAllAttributes all_atts;
-
if (is_destroy) {
gf_sc_texture_destroy(&stack->txh);
gf_sg_mfurl_del(stack->txurl);
return;
}
-
/*TRAVERSE_DRAW is NEVER called in 3D mode*/
if (tr_state->traversing_mode==TRAVERSE_DRAW_2D) {
SVG_Draw_bitmap(tr_state);
GF_FieldInfo init_vis_info;
SVG_video_stack *stack = (SVG_video_stack *) gf_node_get_private(txh->owner);
- if (!txh->is_open) {
- SVG_InitialVisibility init_vis;
- if (stack->first_frame_fetched) return;
+ if (!txh->stream) {
+ svg_open_texture(stack);
- init_vis = SVG_INITIALVISIBILTY_WHENSTARTED;
+ if (!txh->is_open) {
+ SVG_InitialVisibility init_vis;
+ if (stack->first_frame_fetched) return;
- if (gf_node_get_attribute_by_tag(txh->owner, TAG_SVG_ATT_initialVisibility, GF_FALSE, GF_FALSE, &init_vis_info) == GF_OK) {
- init_vis = *(SVG_InitialVisibility *)init_vis_info.far_ptr;
- }
+ init_vis = SVG_INITIALVISIBILTY_WHENSTARTED;
+
+ if (gf_node_get_attribute_by_tag(txh->owner, TAG_SVG_ATT_initialVisibility, GF_FALSE, GF_FALSE, &init_vis_info) == GF_OK) {
+ init_vis = *(SVG_InitialVisibility *)init_vis_info.far_ptr;
+ }
- /*opens stream only at first access to fetch first frame if needed*/
- if (init_vis == SVG_INITIALVISIBILTY_ALWAYS) {
- svg_play_texture((SVG_video_stack*)stack, NULL);
- gf_sc_invalidate(txh->compositor, NULL);
+ /*opens stream only at first access to fetch first frame if needed*/
+ if (init_vis == SVG_INITIALVISIBILTY_ALWAYS) {
+ svg_play_texture((SVG_video_stack*)stack, NULL);
+ gf_sc_invalidate(txh->compositor, NULL);
+ }
}
return;
}
}
GF_EXPORT
-GF_Err gf_sc_texture_play_from_to(GF_TextureHandler *txh, MFURL *url, Double start_offset, Double end_offset, Bool can_loop, Bool lock_scene_timeline)
+GF_Err gf_sc_texture_open(GF_TextureHandler *txh, MFURL *url, Bool lock_scene_timeline)
{
if (txh->is_open) return GF_BAD_PARAM;
txh->stream = gf_mo_register(txh->owner, url, lock_scene_timeline, 0);
/*bad/Empty URL*/
if (!txh->stream) return GF_NOT_SUPPORTED;
+
+ return GF_OK;
+}
+
+GF_EXPORT
+GF_Err gf_sc_texture_play_from_to(GF_TextureHandler *txh, MFURL *url, Double start_offset, Double end_offset, Bool can_loop, Bool lock_scene_timeline)
+{
+ if (!txh->stream) {
+ GF_Err e;
+ e = gf_sc_texture_open(txh, url, lock_scene_timeline);
+ if (e != GF_OK) return e;
+ }
+
/*request play*/
gf_mo_play(txh->stream, start_offset, end_offset, can_loop);
void gf_sc_texture_update_frame(GF_TextureHandler *txh, Bool disable_resync)
{
Bool needs_reload = 0;
- u32 size, ts, ms_until_next;
- s32 ms_until_pres;
+ u32 size, ts;
+ s32 ms_until_pres, ms_until_next;
/*already refreshed*/
if (txh->needs_refresh) return;
gf_mo_release_data(txh->stream, 0xFFFFFFFF, 0);
txh->needs_release = 0;
if (!txh->stream_finished) {
- if (txh->compositor->next_frame_delay > ms_until_next)
+ if (ms_until_next>0 && (txh->compositor->next_frame_delay > (u32) ms_until_next))
txh->compositor->next_frame_delay = ms_until_next;
}
return;
Bool gf_sc_texture_is_transparent(GF_TextureHandler *txh);
+void gf_sc_texture_check_pause_on_first_load(GF_TextureHandler *txh);
+
/*ALL THE FOLLOWING ARE ONLY AVAILABLE IN 3D AND DEAL WITH OPENGL TEXTURE MANAGEMENT*/
#ifndef GPAC_DISABLE_3D
TX_NEEDS_RASTER_LOAD = (1<<1),
/*signal video data must be sent to 3D hw*/
TX_NEEDS_HW_LOAD = (1<<2),
+
/*OpenGL texturing flags*/
/*these 4 are exclusives*/
- TX_MUST_SCALE = (1<<3),
- TX_IS_POW2 = (1<<4),
- TX_IS_RECT = (1<<5),
- TX_EMULE_POW2 = (1<<6),
- TX_EMULE_FIRST_LOAD = (1<<7),
+ TX_MUST_SCALE = (1<<10),
+ TX_IS_POW2 = (1<<11),
+ TX_IS_RECT = (1<<12),
+ TX_EMULE_POW2 = (1<<13),
+ TX_EMULE_FIRST_LOAD = (1<<14),
- TX_IS_FLIPPED = (1<<8),
+ TX_IS_FLIPPED = (1<<15),
};
/*2D texturing*/
GF_STENCIL tx_raster;
+ //0: not paused, 1: paused, 2: initial pause has been done
+ u32 init_pause_status;
/*3D texturing*/
#ifndef GPAC_DISABLE_3D
u32 nb_comp, gl_format, gl_type, gl_dtype;
Bool yuv_shader;
u32 v_id, u_id;
+ u32 pbo_id, u_pbo_id, v_pbo_id;
+ Bool pbo_pushed;
#endif
#ifdef GF_SR_USE_DEPTH
char *depth_data;
return GF_OK;
}
+static void release_txio(struct __texture_wrapper *tx_io)
+{
+
+#ifndef GPAC_DISABLE_3D
+ if (tx_io->id) glDeleteTextures(1, &tx_io->id);
+ if (tx_io->u_id) glDeleteTextures(1, &tx_io->u_id);
+ if (tx_io->v_id) glDeleteTextures(1, &tx_io->v_id);
+
+ if (tx_io->pbo_id) glDeleteBuffers(1, &tx_io->pbo_id);
+ if (tx_io->u_pbo_id) glDeleteBuffers(1, &tx_io->u_pbo_id);
+ if (tx_io->v_pbo_id) glDeleteBuffers(1, &tx_io->v_pbo_id);
+
+ if (tx_io->scale_data) gf_free(tx_io->scale_data);
+ if (tx_io->conv_data) gf_free(tx_io->conv_data);
+#endif
+
+#ifdef GF_SR_USE_DEPTH
+ if (tx_io->depth_data) gf_free(tx_io->depth_data);
+#endif
+
+ gf_free(tx_io);
+}
+
void gf_sc_texture_release(GF_TextureHandler *txh)
{
if (txh->vout_udta && txh->compositor->video_out->ReleaseTexture) {
txh->vout_udta = NULL;
}
- if (!txh->tx_io) return;
- if (txh->tx_io->tx_raster) {
- txh->compositor->rasterizer->stencil_delete(txh->tx_io->tx_raster);
- txh->tx_io->tx_raster = NULL;
- }
+ if (txh->tx_io) {
+ if (txh->tx_io->tx_raster) {
+ txh->compositor->rasterizer->stencil_delete(txh->tx_io->tx_raster);
+ txh->tx_io->tx_raster = NULL;
+ }
-#ifndef GPAC_DISABLE_3D
- if (txh->tx_io->id) glDeleteTextures(1, &txh->tx_io->id);
- if (txh->tx_io->u_id) glDeleteTextures(1, &txh->tx_io->u_id);
- if (txh->tx_io->v_id) glDeleteTextures(1, &txh->tx_io->v_id);
- if (txh->tx_io->scale_data) gf_free(txh->tx_io->scale_data);
- if (txh->tx_io->conv_data) gf_free(txh->tx_io->conv_data);
-#endif
+ if (gf_th_id()==txh->compositor->video_th_id) {
+ release_txio(txh->tx_io);
+ } else {
+ gf_list_add(txh->compositor->textures_gc, txh->tx_io);
+ }
+ txh->tx_io=NULL;
+ }
+}
-#ifdef GF_SR_USE_DEPTH
- if (txh->tx_io->depth_data) gf_free(txh->tx_io->depth_data);
-#endif
+void gf_sc_texture_cleanup_hw(GF_Compositor *compositor)
+{
+ while (gf_list_count(compositor->textures_gc)) {
+ struct __texture_wrapper *tx_io = (struct __texture_wrapper *) gf_list_last(compositor->textures_gc);
+ gf_list_rem_last(compositor->textures_gc);
- gf_free(txh->tx_io);
- txh->tx_io = NULL;
+ release_txio(tx_io);
+ }
}
+
+
GF_Err gf_sc_texture_set_data(GF_TextureHandler *txh)
{
txh->tx_io->flags |= TX_NEEDS_RASTER_LOAD | TX_NEEDS_HW_LOAD;
+
+#if !defined(GPAC_DISABLE_3D) && !defined(GPAC_USE_TINYGL) && !defined(GPAC_USE_OGL_ES)
+ //PBO mode: start pushing the texture
+ if (txh->tx_io->pbo_id) {
+ u8 *ptr;
+ u32 size = txh->stride*txh->height;
+
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, txh->tx_io->pbo_id);
+ ptr =(u8 *)glMapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, GL_WRITE_ONLY_ARB);
+ if (ptr) memcpy(ptr, txh->data, size);
+ glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
+
+ if (txh->tx_io->u_pbo_id) {
+ u8 *pU = txh->pU;
+ u8 *pV = txh->pV;
+ if (!pU) pU = (u8 *) txh->data + size;
+ if (!pV) pV = (u8 *) txh->data + 5*size/4;
+
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, txh->tx_io->u_pbo_id);
+ ptr =(u8 *)glMapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, GL_WRITE_ONLY_ARB);
+ if (ptr) memcpy(ptr, pU, size/4);
+ glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
+
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, txh->tx_io->v_pbo_id);
+ ptr =(u8 *)glMapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, GL_WRITE_ONLY_ARB);
+ if (ptr) memcpy(ptr, pV, size/4);
+ glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
+ }
+
+ txh->tx_io->pbo_pushed = 1;
+
+ //we just pushed our texture to the GPU, release
+ if (txh->raw_memory) {
+ gf_sc_texture_release_stream(txh);
+ }
+ }
+#endif
return GF_OK;
}
#ifndef GPAC_DISABLE_3D
if (txh->tx_io->id) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Texturing] Releasing OpenGL texture %d\n", txh->tx_io->id));
+
glDeleteTextures(1, &txh->tx_io->id);
txh->tx_io->id = 0;
if (txh->tx_io->u_id) {
glDeleteTextures(1, &txh->tx_io->v_id);
txh->tx_io->u_id = txh->tx_io->v_id = 0;
}
+ if (txh->tx_io->pbo_id) glDeleteBuffers(1, &txh->tx_io->pbo_id);
+ if (txh->tx_io->u_pbo_id) glDeleteBuffers(1, &txh->tx_io->u_pbo_id);
+ if (txh->tx_io->v_pbo_id) glDeleteBuffers(1, &txh->tx_io->v_pbo_id);
+ txh->tx_io->pbo_id = txh->tx_io->u_pbo_id = txh->tx_io->v_pbo_id = 0;
}
txh->tx_io->flags |= TX_NEEDS_HW_LOAD;
#endif
txh->compositor->visual->current_texture_glsl_program = 0;
glActiveTexture(GL_TEXTURE0);
glBindTexture(txh->tx_io->gl_type, 0);
+
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[GL Texture] Texture drawn (CTS %d)\n", txh->last_frame_time));
+
}
#endif
glDisable(txh->tx_io->gl_type);
if (txh->transparent) glDisable(GL_BLEND);
+
+ gf_sc_texture_check_pause_on_first_load(txh);
}
}
tx_id[2] = txh->tx_io->v_id;
nb_tx = 3;
-
if (txh->tx_io->flags & TX_IS_RECT) {
GLint loc;
glUseProgram(compositor->visual->yuv_rect_glsl_program);
}
#endif
+#if !defined(GPAC_USE_TINYGL) && !defined(GPAC_USE_OGL_ES)
+ if (txh->compositor->gl_caps.pbo && txh->compositor->enable_pbo) {
+ u32 size = txh->stride*txh->height;
+
+ if (!txh->tx_io->pbo_id && txh->tx_io->id) {
+ glGenBuffers(1, &txh->tx_io->pbo_id);
+ if (txh->tx_io->pbo_id) {
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, txh->tx_io->pbo_id);
+ glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, size, NULL, GL_DYNAMIC_DRAW_ARB);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
+ }
+ }
+ if (!txh->tx_io->u_pbo_id && txh->tx_io->u_id) {
+ glGenBuffers(1, &txh->tx_io->u_pbo_id);
+ if (txh->tx_io->u_pbo_id) {
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, txh->tx_io->u_pbo_id);
+ glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, size/4, NULL, GL_DYNAMIC_DRAW_ARB);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
+ }
+ }
+ if (!txh->tx_io->v_pbo_id && txh->tx_io->v_id) {
+ glGenBuffers(1, &txh->tx_io->v_pbo_id);
+ if (txh->tx_io->v_pbo_id) {
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, txh->tx_io->v_pbo_id);
+ glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, size/4, NULL, GL_DYNAMIC_DRAW_ARB);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
+ }
+ }
+ }
+#endif
+
if (use_yuv_shaders) {
//we use LUMINANCE because GL_RED is not defined on android ...
txh->tx_io->gl_format = GL_LUMINANCE;
glPixelTransferi(GL_RED_SCALE, 64);
#endif
} else {
+#ifndef GPAC_USE_OGL_ES
+ glPixelTransferi(GL_RED_SCALE, 1);
+#endif
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
}
glDisable(txh->tx_io->gl_type);
#ifndef GPAC_DISABLE_3D
-static void do_tex_image_2d(GF_TextureHandler *txh, GLint tx_mode, Bool first_load, u8 *data, u32 stride, u32 w, u32 h)
+static void do_tex_image_2d(GF_TextureHandler *txh, GLint tx_mode, Bool first_load, u8 *data, u32 stride, u32 w, u32 h, u32 pbo_id)
{
Bool needs_stride;
if (txh->tx_io->gl_dtype==GL_UNSIGNED_SHORT) {
needs_stride = (stride != 2*w*txh->tx_io->nb_comp) ? GF_TRUE : GF_FALSE;
+ if (needs_stride) stride /= 2;
} else {
needs_stride = (stride!=w*txh->tx_io->nb_comp) ? GF_TRUE : GF_FALSE;
}
if (needs_stride) {
#endif
+#if !defined(GPAC_USE_TINYGL) && !defined(GPAC_USE_OGL_ES)
+ if (txh->tx_io->pbo_pushed) {
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo_id);
+ glTexImage2D(txh->tx_io->gl_type, 0, tx_mode, w, h, 0, txh->tx_io->gl_format, txh->tx_io->gl_dtype, NULL);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
+ }
+ else
+#endif
if (first_load) {
glTexImage2D(txh->tx_io->gl_type, 0, tx_mode, w, h, 0, txh->tx_io->gl_format, txh->tx_io->gl_dtype, data);
} else {
/*force setup of image*/
txh->needs_refresh = 1;
tx_setup_format(txh);
- first_load = 1;
+ txh->tx_io->flags |= TX_EMULE_FIRST_LOAD;
GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Texturing] Allocating OpenGL texture %d\n", txh->tx_io->id));
}
if (!txh->tx_io->gl_type) return 0;
/*if data not yet ready don't push the texture*/
if (txh->data) {
-
- if (txh->tx_io->flags & TX_EMULE_FIRST_LOAD) {
- txh->tx_io->flags &= ~TX_EMULE_FIRST_LOAD;
- first_load = 1;
- }
-
/*convert image*/
gf_sc_texture_convert(txh);
}
txh->tx_io->flags &= ~TX_NEEDS_HW_LOAD;
data = gf_sc_texture_get_data(txh, &pixel_format);
if (!data) return 0;
+
+ if (txh->tx_io->flags & TX_EMULE_FIRST_LOAD) {
+ txh->tx_io->flags &= ~TX_EMULE_FIRST_LOAD;
+ first_load = 1;
+ }
+
if (txh->tx_io->flags & TX_EMULE_POW2) {
w = txh->tx_io->conv_w;
h = txh->tx_io->conv_h;
#else
+ gf_sc_texture_check_pause_on_first_load(txh);
+
/*pow2 texture or hardware support*/
if (! (txh->tx_io->flags & TX_MUST_SCALE) ) {
if (txh->tx_io->yuv_shader) {
u32 push_time;
u8 *pY, *pU, *pV;
u32 ck;
- pY = data;
+ pY = (u8 *) data;
if (txh->raw_memory) {
- if (!txh->pU || !txh->pV) return 0;
-
- pU = txh->pU;
- pV = txh->pV;
+ assert(txh->pU && txh->pV);
+ pU = (u8 *) txh->pU;
+ pV = (u8 *) txh->pV;
} else {
- pU = pY + txh->height*txh->stride;
- pV = pU + txh->height*txh->stride/4;
+ pU = (u8 *) pY + txh->height*txh->stride;
+ pV = (u8 *) pU + txh->height*txh->stride/4;
+ }
+
+#ifndef GPAC_USE_OGL_ES
+ if (txh->pixelformat==GF_PIXEL_YV12_10) {
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 2);
+ //we use 10 bits but GL will normalise using 16 bits, so we need to multiply the nomralized result by 2^6
+ glPixelTransferi(GL_RED_SCALE, 64);
}
+#endif
push_time = gf_sys_clock();
- do_tex_image_2d(txh, tx_mode, first_load, pY, txh->stride, w, h);
+ do_tex_image_2d(txh, tx_mode, first_load, pY, txh->stride, w, h, txh->tx_io->pbo_id);
+ GL_CHECK_ERR
glBindTexture(txh->tx_io->gl_type, txh->tx_io->u_id);
- do_tex_image_2d(txh, tx_mode, first_load, pU, txh->stride/2, w/2, h/2);
+ do_tex_image_2d(txh, tx_mode, first_load, pU, txh->stride/2, w/2, h/2, txh->tx_io->u_pbo_id);
+ GL_CHECK_ERR
glBindTexture(txh->tx_io->gl_type, txh->tx_io->v_id);
- do_tex_image_2d(txh, tx_mode, first_load, pV, txh->stride/2, w/2, h/2);
+ do_tex_image_2d(txh, tx_mode, first_load, pV, txh->stride/2, w/2, h/2, txh->tx_io->v_pbo_id);
+ GL_CHECK_ERR
push_time = gf_sys_clock() - push_time;
txh->nb_frames ++;
txh->upload_time += push_time;
+#ifndef GPAC_USE_OGL_ES
+ if (txh->pixelformat==GF_PIXEL_YV12_10) {
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+ glPixelTransferi(GL_RED_SCALE, 1);
+ }
+#endif
+
#ifndef GPAC_DISABLE_LOGS
gf_mo_get_object_time(txh->stream, &ck);
- GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[GL Texture] Texure (CTS %d) %d ms after due date - Pushed Y,U,V texures in %d ms - average push time %d ms\n", txh->last_frame_time, ck - txh->last_frame_time, push_time, txh->upload_time / txh->nb_frames));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[GL Texture] Texture (CTS %d) %d ms after due date - Pushed Y,U,V textures in %d ms - average push time %d ms (PBO enabled %s)\n", txh->last_frame_time, ck - txh->last_frame_time, push_time, txh->upload_time / txh->nb_frames, txh->tx_io->pbo_pushed ? "yes" : "no"));
#endif
- //we just pushed our texture to the GPU, release
- if (txh->raw_memory) {
- gf_sc_texture_release_stream(txh);
- }
+ txh->tx_io->pbo_pushed = 0;
} else {
- if (first_load) {
- glTexImage2D(txh->tx_io->gl_type, 0, tx_mode, w, h, 0, txh->tx_io->gl_format, txh->tx_io->gl_dtype, (unsigned char *) data);
- } else {
- glTexSubImage2D(txh->tx_io->gl_type, 0, 0, 0, w, h, txh->tx_io->gl_format, txh->tx_io->gl_dtype, (unsigned char *) data);
- }
+ do_tex_image_2d(txh, tx_mode, first_load, (u8 *) data, txh->stride, w, h, txh->tx_io->pbo_id);
+ txh->tx_io->pbo_pushed = 0;
}
} else {
}
}
#endif
+
return 1;
#endif
return ret;
}
#endif
- if (!txh || !txh->tx_io) return 0;
+ if (!txh || !txh->tx_io)
+ return 0;
if (txh->compute_gradient_matrix && gf_sc_texture_needs_reload(txh) ) {
compositor_gradient_update(txh);
/*use our program*/
Bool is_rect = txh->tx_io->flags & TX_IS_RECT;
compositor->visual->current_texture_glsl_program = is_rect ? compositor->visual->yuv_rect_glsl_program : compositor->visual->yuv_glsl_program;
+ GL_CHECK_ERR
glUseProgram(compositor->visual->current_texture_glsl_program);
+ GL_CHECK_ERR
glEnable(txh->tx_io->gl_type);
txh->tx_io->flags |= TX_NEEDS_HW_LOAD;
}
+void gf_sc_texture_check_pause_on_first_load(GF_TextureHandler *txh)
+{
+ if (txh->stream && txh->tx_io) {
+ switch (txh->tx_io->init_pause_status) {
+ case 0:
+ gf_sc_ar_control(txh->compositor->audio_renderer, 0);
+ txh->tx_io->init_pause_status = 1;
+ break;
+ case 1:
+ gf_sc_ar_control(txh->compositor->audio_renderer, 1);
+ txh->tx_io->init_pause_status = 2;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
* Visual Manager part for 3D drawing
*/
-#if defined( _LP64 ) && defined(CONFIG_DARWIN_GL)
+#if defined( _LP64 ) && 0 && defined(CONFIG_DARWIN_GL)
#define GF_SHADERID u64
#else
#define GF_SHADERID u32
u32 nb_views, current_view, autostereo_type, camera_layout;
Bool reverse_views;
+ GF_SHADERID base_glsl_vertex;
+
u32 *gl_textures;
u32 auto_stereo_width, auto_stereo_height;
GF_Mesh *autostereo_mesh;
- GF_SHADERID glsl_program;
- GF_SHADERID glsl_vertex;
- GF_SHADERID glsl_fragment;
+ GF_SHADERID autostereo_glsl_program;
+ GF_SHADERID autostereo_glsl_fragment;
GF_SHADERID yuv_glsl_program;
GF_SHADERID yuv_glsl_fragment;
#define ra_init(ra) { (ra)->count = 0; (ra)->alloc = RA_DEFAULT_STEP; (ra)->list = (GF_RectArrayEntry*)gf_malloc(sizeof(GF_RectArrayEntry)*(ra)->alloc); }
/*deletes structure - called as a destructor*/
-#define ra_del(ra) { if ((ra)->list) gf_free((ra)->list); }
+#define ra_del(ra) { if ((ra)->list) { gf_free((ra)->list); (ra)->list = NULL; } }
/*adds rect to list - expand if needed*/
GLDECL_STATIC(glBindBuffer);
GLDECL_STATIC(glBufferData);
GLDECL_STATIC(glBufferSubData);
+GLDECL_STATIC(glMapBuffer);
+GLDECL_STATIC(glUnmapBuffer);
#endif //LOAD_GL_1_5
#ifdef LOAD_GL_2_0
if (!has_gl_context) return;
- /*we have a GL context, get proc addresses*/
+ /*we have a GL context, init the rest (proc addresses & co)*/
+ glGetIntegerv(GL_MAX_TEXTURE_SIZE, &compositor->gl_caps.max_texture_size);
#ifdef LOAD_GL_1_3
if (CHECK_GL_EXT("GL_ARB_multitexture")) {
}
GET_GLFUN(glBlendEquation);
#endif
-
+
#ifdef LOAD_GL_1_4
if (compositor->gl_caps.point_sprite) {
GET_GLFUN(glPointParameterf);
GET_GLFUN(glBufferData);
GET_GLFUN(glBufferSubData);
}
+ if (CHECK_GL_EXT("GL_ARB_pixel_buffer_object")) {
+ GET_GLFUN(glMapBuffer);
+ GET_GLFUN(glUnmapBuffer);
+
+ compositor->gl_caps.pbo=1;
+ }
#endif
+
#ifdef LOAD_GL_2_0
GET_GLFUN(glCreateProgram);
#if !defined(GPAC_USE_TINYGL) && !defined(GPAC_USE_OGL_ES)
+
static char *default_glsl_vertex = "\
varying vec3 gfNormal;\
varying vec3 gfView;\
+ varying vec2 TexCoord;\
void main(void)\
{\
gfView = vec3(gl_ModelViewMatrix * gl_Vertex);\
gfNormal = normalize(gl_NormalMatrix * gl_Normal);\
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;\
- gl_TexCoord[0] = gl_MultiTexCoord0;\
+ TexCoord = gl_MultiTexCoord0.st;\
}";
#ifdef GPAC_UNUSED_FUNC
static char *default_glsl_lighting = "\
varying vec3 gfNormal;\
varying vec3 gfView;\
+ varying vec2 TexCoord;\
void gpac_lighting (void) \
{ \
vec3 L = normalize(gl_LightSource[0].position.xyz - gfView);\
static char *glsl_view_anaglyph = "\
uniform sampler2D gfView1;\
uniform sampler2D gfView2;\
+ varying vec2 TexCoord;\
void main(void) \
{\
- vec4 col1 = texture2D(gfView1, gl_TexCoord[0].st); \
- vec4 col2 = texture2D(gfView2, gl_TexCoord[0].st); \
+ vec4 col1 = texture2D(gfView1, TexCoord.st); \
+ vec4 col2 = texture2D(gfView2, TexCoord.st); \
gl_FragColor.r = col1.r;\
gl_FragColor.g = col2.g;\
gl_FragColor.b = col2.b;\
static char *glsl_view_anaglyph_optimize = "\
uniform sampler2D gfView1;\
uniform sampler2D gfView2;\
+ varying vec2 TexCoord;\
void main(void) \
{\
- vec4 col1 = texture2D(gfView1, gl_TexCoord[0].st); \
- vec4 col2 = texture2D(gfView2, gl_TexCoord[0].st); \
+ vec4 col1 = texture2D(gfView1, TexCoord.st); \
+ vec4 col2 = texture2D(gfView2, TexCoord.st); \
gl_FragColor.r = 0.7*col1.g + 0.3*col1.b;\
gl_FragColor.r = pow(gl_FragColor.r, 1.5);\
gl_FragColor.g = col2.g;\
static char *glsl_view_columns = "\
uniform sampler2D gfView1;\
uniform sampler2D gfView2;\
+ varying vec2 TexCoord;\
void main(void) \
{\
if ( int( mod(gl_FragCoord.x, 2.0) ) == 0) \
- gl_FragColor = texture2D(gfView1, gl_TexCoord[0].st); \
+ gl_FragColor = texture2D(gfView1, TexCoord.st); \
else \
- gl_FragColor = texture2D(gfView2, gl_TexCoord[0].st); \
+ gl_FragColor = texture2D(gfView2, TexCoord.st); \
}";
static char *glsl_view_rows = "\
uniform sampler2D gfView1;\
uniform sampler2D gfView2;\
+ varying vec2 TexCoord;\
void main(void) \
{\
if ( int( mod(gl_FragCoord.y, 2.0) ) == 0) \
- gl_FragColor = texture2D(gfView1, gl_TexCoord[0].st); \
+ gl_FragColor = texture2D(gfView1, TexCoord.st); \
else \
- gl_FragColor = texture2D(gfView2, gl_TexCoord[0].st); \
+ gl_FragColor = texture2D(gfView2, TexCoord.st); \
}";
static char *glsl_view_5VSP19 = "\
uniform sampler2D gfView3;\
uniform sampler2D gfView4;\
uniform sampler2D gfView5;\
+ varying vec2 TexCoord;\
void main(void) {\
vec4 color[5];\
- color[0] = texture2D(gfView5, gl_TexCoord[0].st);\
- color[1] = texture2D(gfView4, gl_TexCoord[0].st);\
- color[2] = texture2D(gfView3, gl_TexCoord[0].st);\
- color[3] = texture2D(gfView2, gl_TexCoord[0].st);\
- color[4] = texture2D(gfView1, gl_TexCoord[0].st);\
+ color[0] = texture2D(gfView5, TexCoord.st);\
+ color[1] = texture2D(gfView4, TexCoord.st);\
+ color[2] = texture2D(gfView3, TexCoord.st);\
+ color[3] = texture2D(gfView2, TexCoord.st);\
+ color[4] = texture2D(gfView1, TexCoord.st);\
float pitch = 5.0 + 1.0 - mod(gl_FragCoord.y , 5.0);\
int col = int( mod(pitch + 3.0 * (gl_FragCoord.x), 5.0 ) );\
int Vr = int(col);\
}";
static char *glsl_yuv_shader = "\
- uniform sampler2D y_plane;\
- uniform sampler2D u_plane;\
- uniform sampler2D v_plane;\
- uniform float alpha;\
- const vec3 offset = vec3(-0.0625, -0.5, -0.5);\
- const vec3 R_mul = vec3(1.164, 0.000, 1.596);\
- const vec3 G_mul = vec3(1.164, -0.391, -0.813);\
- const vec3 B_mul = vec3(1.164, 2.018, 0.000);\
- void main(void) \
- {\
- vec2 texc;\
- vec3 yuv, rgb;\
- texc = gl_TexCoord[0].st;\
- texc.y = 1.0 - texc.y;\
- yuv.x = texture2D(y_plane, texc).r; \
- yuv.y = texture2D(u_plane, texc).r; \
- yuv.z = texture2D(v_plane, texc).r; \
- yuv += offset; \
- rgb.r = dot(yuv, R_mul); \
- rgb.g = dot(yuv, G_mul); \
- rgb.b = dot(yuv, B_mul); \
- gl_FragColor = vec4(rgb, alpha);\
- }";
+ uniform sampler2D y_plane;\n\
+ uniform sampler2D u_plane;\n\
+ uniform sampler2D v_plane;\n\
+ uniform float alpha;\n\
+ varying vec2 TexCoord;\n\
+ const vec3 offset = vec3(-0.0625, -0.5, -0.5);\n\
+ const vec3 R_mul = vec3(1.164, 0.000, 1.596);\n\
+ const vec3 G_mul = vec3(1.164, -0.391, -0.813);\n\
+ const vec3 B_mul = vec3(1.164, 2.018, 0.000);\n\
+ void main(void) \n\
+ {\n\
+ vec2 texc;\n\
+ vec3 yuv, rgb;\n\
+ texc = TexCoord.st;\n\
+ texc.y = 1.0 - texc.y;\n\
+ yuv.x = texture2D(y_plane, texc).r;\n\
+ yuv.y = texture2D(u_plane, texc).r;\n\
+ yuv.z = texture2D(v_plane, texc).r;\n\
+ yuv += offset;\n\
+ rgb.r = dot(yuv, R_mul);\n\
+ rgb.g = dot(yuv, G_mul);\n\
+ rgb.b = dot(yuv, B_mul);\n\
+ gl_FragColor = vec4(rgb, alpha);\n\
+ }\n";
static char *glsl_yuv_rect_shader_strict = "\
#version 140\n\
#extension GL_ARB_texture_rectangle : enable\n\
- uniform sampler2DRect y_plane;\
- uniform sampler2DRect u_plane;\
- uniform sampler2DRect v_plane;\
- uniform float width;\
- uniform float height;\
- uniform float alpha;\
- const vec3 offset = vec3(-0.0625, -0.5, -0.5);\
- const vec3 R_mul = vec3(1.164, 0.000, 1.596);\
- const vec3 G_mul = vec3(1.164, -0.391, -0.813);\
- const vec3 B_mul = vec3(1.164, 2.018, 0.000);\
- out vec4 FragColor;\
- void main(void) \
- {\
- vec2 texc;\
- vec3 yuv, rgb;\
- texc = gl_TexCoord[0].st;\
- texc.y = 1.0 - texc.y;\
- texc.x *= width;\
- texc.y *= height;\
- yuv.x = texture2DRect(y_plane, texc).r; \
- texc.x /= 2.0;\
- texc.y /= 2.0;\
- yuv.y = texture2DRect(u_plane, texc).r; \
- yuv.z = texture2DRect(v_plane, texc).r; \
- yuv += offset; \
- rgb.r = dot(yuv, R_mul); \
- rgb.g = dot(yuv, G_mul); \
- rgb.b = dot(yuv, B_mul); \
- FragColor = vec4(rgb, alpha);\
- }";
+ uniform sampler2DRect y_plane;\n\
+ uniform sampler2DRect u_plane;\n\
+ uniform sampler2DRect v_plane;\n\
+ uniform float width;\n\
+ uniform float height;\n\
+ uniform float alpha;\n\
+ in vec2 TexCoord;\n\
+ const vec3 offset = vec3(-0.0625, -0.5, -0.5);\n\
+ const vec3 R_mul = vec3(1.164, 0.000, 1.596);\n\
+ const vec3 G_mul = vec3(1.164, -0.391, -0.813);\n\
+ const vec3 B_mul = vec3(1.164, 2.018, 0.000);\n\
+ out vec4 FragColor;\n\
+ void main(void) \n\
+ {\n\
+ vec2 texc;\n\
+ vec3 yuv, rgb;\n\
+ texc = TexCoord.st;\n\
+ texc.y = 1.0 - texc.y;\n\
+ texc.x *= width;\n\
+ texc.y *= height;\n\
+ yuv.x = texture2DRect(y_plane, texc).r;\n\
+ texc.x /= 2.0;\n\
+ texc.y /= 2.0;\n\
+ yuv.y = texture2DRect(u_plane, texc).r;\n\
+ yuv.z = texture2DRect(v_plane, texc).r;\n\
+ yuv += offset;\n\
+ rgb.r = dot(yuv, R_mul);\n\
+ rgb.g = dot(yuv, G_mul);\n\
+ rgb.b = dot(yuv, B_mul);\n\
+ FragColor = vec4(rgb, alpha);\n\
+ }\n";
static char *glsl_yuv_rect_shader_relaxed= "\
- uniform sampler2DRect y_plane;\
- uniform sampler2DRect u_plane;\
- uniform sampler2DRect v_plane;\
- uniform float width;\
- uniform float height;\
- uniform float alpha;\
- const vec3 offset = vec3(-0.0625, -0.5, -0.5);\
- const vec3 R_mul = vec3(1.164, 0.000, 1.596);\
- const vec3 G_mul = vec3(1.164, -0.391, -0.813);\
- const vec3 B_mul = vec3(1.164, 2.018, 0.000);\
- void main(void) \
- {\
- vec2 texc;\
- vec3 yuv, rgb;\
- texc = gl_TexCoord[0].st;\
- texc.y = 1.0 - texc.y;\
- texc.x *= width;\
- texc.y *= height;\
- yuv.x = texture2DRect(y_plane, texc).r; \
- texc.x /= 2.0;\
- texc.y /= 2.0;\
- yuv.y = texture2DRect(u_plane, texc).r; \
- yuv.z = texture2DRect(v_plane, texc).r; \
- yuv += offset; \
- rgb.r = dot(yuv, R_mul); \
- rgb.g = dot(yuv, G_mul); \
- rgb.b = dot(yuv, B_mul); \
- gl_FragColor = vec4(rgb, alpha);\
- }";
+ uniform sampler2DRect y_plane;\n\
+ uniform sampler2DRect u_plane;\n\
+ uniform sampler2DRect v_plane;\n\
+ uniform float width;\n\
+ uniform float height;\n\
+ uniform float alpha;\n\
+ varying vec2 TexCoord;\n\
+ const vec3 offset = vec3(-0.0625, -0.5, -0.5);\n\
+ const vec3 R_mul = vec3(1.164, 0.000, 1.596);\n\
+ const vec3 G_mul = vec3(1.164, -0.391, -0.813);\n\
+ const vec3 B_mul = vec3(1.164, 2.018, 0.000);\n\
+ void main(void) \n\
+ {\n\
+ vec2 texc;\n\
+ vec3 yuv, rgb;\n\
+ texc = TexCoord.st;\n\
+ texc.y = 1.0 - texc.y;\n\
+ texc.x *= width;\n\
+ texc.y *= height;\n\
+ yuv.x = texture2DRect(y_plane, texc).r;\n\
+ texc.x /= 2.0;\n\
+ texc.y /= 2.0;\n\
+ yuv.y = texture2DRect(u_plane, texc).r;\n\
+ yuv.z = texture2DRect(v_plane, texc).r;\n\
+ yuv += offset;\n\
+ rgb.r = dot(yuv, R_mul);\n\
+ rgb.g = dot(yuv, G_mul);\n\
+ rgb.b = dot(yuv, B_mul);\n\
+ gl_FragColor = vec4(rgb, alpha);\n\
+ }\n";
Bool visual_3d_compile_shader(GF_SHADERID shader_id, const char *name, const char *source)
{
GLint blen = 0;
GLsizei slen = 0;
- u32 len;
+ s32 len;
if (!source || !shader_id) return 0;
len = (u32) strlen(source);
glShaderSource(shader_id, 1, &source, &len);
glGetInfoLogARB(shader_id, blen, &slen, compiler_log);
#endif
GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[GLSL] Failed to compile shader %s: %s\n", name, compiler_log));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[GLSL] ***** faulty shader code ****\n%s\n**********************\n", source));
gf_free (compiler_log);
return 0;
}
return 1;
}
+static GF_SHADERID visual_3d_shader_from_source_file(const char *src_path, u32 shader_type)
+{
+ FILE *src = gf_f64_open(src_path, "rt");
+ GF_SHADERID shader = 0;
+ if (src) {
+ size_t size;
+ char *shader_src;
+ gf_f64_seek(src, 0, SEEK_END);
+ size = (size_t) gf_f64_tell(src);
+ gf_f64_seek(src, 0, SEEK_SET);
+ shader_src = gf_malloc(sizeof(char)*(size+1));
+ size = fread(shader_src, 1, size, src);
+ fclose(src);
+ if (size != (size_t) -1) {
+ shader_src[size]=0;
+ shader = glCreateShader(shader_type);
+ if (visual_3d_compile_shader(shader, (shader_type == GL_FRAGMENT_SHADER) ? "fragment" : "vertex", shader_src)==GF_FALSE) {
+ glDeleteShader(shader);
+ shader = 0;
+ }
+ }
+ gf_free(shader_src);
+ } else {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to open shader file %s\n", src_path));
+ }
+ return shader;
+}
void visual_3d_init_stereo_shaders(GF_VisualManager *visual)
{
if (!visual->compositor->gl_caps.has_shaders) return;
- if (visual->glsl_program) return;
+ if (visual->autostereo_glsl_program) return;
- visual->glsl_program = glCreateProgram();
+ visual->autostereo_glsl_program = glCreateProgram();
- if (!visual->glsl_vertex) {
- visual->glsl_vertex = glCreateShader(GL_VERTEX_SHADER);
- visual_3d_compile_shader(visual->glsl_vertex, "vertex", default_glsl_vertex);
+ if (!visual->base_glsl_vertex) {
+ visual->base_glsl_vertex = glCreateShader(GL_VERTEX_SHADER);
+ visual_3d_compile_shader(visual->base_glsl_vertex, "vertex", default_glsl_vertex);
}
switch (visual->autostereo_type) {
case GF_3D_STEREO_COLUMNS:
- visual->glsl_fragment = glCreateShader(GL_FRAGMENT_SHADER);
- visual_3d_compile_shader(visual->glsl_fragment, "fragment", glsl_view_columns);
+ visual->autostereo_glsl_fragment = glCreateShader(GL_FRAGMENT_SHADER);
+ visual_3d_compile_shader(visual->autostereo_glsl_fragment, "fragment", glsl_view_columns);
break;
case GF_3D_STEREO_ROWS:
- visual->glsl_fragment = glCreateShader(GL_FRAGMENT_SHADER);
- visual_3d_compile_shader(visual->glsl_fragment, "fragment", glsl_view_rows);
+ visual->autostereo_glsl_fragment = glCreateShader(GL_FRAGMENT_SHADER);
+ visual_3d_compile_shader(visual->autostereo_glsl_fragment, "fragment", glsl_view_rows);
break;
case GF_3D_STEREO_ANAGLYPH:
- visual->glsl_fragment = glCreateShader(GL_FRAGMENT_SHADER);
- visual_3d_compile_shader(visual->glsl_fragment, "fragment", glsl_view_anaglyph);
+ visual->autostereo_glsl_fragment = glCreateShader(GL_FRAGMENT_SHADER);
+ visual_3d_compile_shader(visual->autostereo_glsl_fragment, "fragment", glsl_view_anaglyph);
break;
case GF_3D_STEREO_5VSP19:
- visual->glsl_fragment = glCreateShader(GL_FRAGMENT_SHADER);
- visual_3d_compile_shader(visual->glsl_fragment, "fragment", glsl_view_5VSP19);
+ visual->autostereo_glsl_fragment = glCreateShader(GL_FRAGMENT_SHADER);
+ visual_3d_compile_shader(visual->autostereo_glsl_fragment, "fragment", glsl_view_5VSP19);
break;
case GF_3D_STEREO_CUSTOM:
{
const char *sOpt = gf_cfg_get_key(visual->compositor->user->config, "Compositor", "InterleaverShader");
if (sOpt) {
- FILE *src = gf_f64_open(sOpt, "rt");
- if (src) {
- size_t size;
- char *shader_src;
- gf_f64_seek(src, 0, SEEK_END);
- size = (size_t) gf_f64_tell(src);
- gf_f64_seek(src, 0, SEEK_SET);
- shader_src = gf_malloc(sizeof(char)*(size+1));
- size = fread(shader_src, 1, size, src);
- fclose(src);
- if (size != (size_t) -1) {
- shader_src[size]=0;
- visual->glsl_fragment = glCreateShader(GL_FRAGMENT_SHADER);
- visual_3d_compile_shader(visual->glsl_fragment, "fragment", shader_src);
- }
- gf_free(shader_src);
- }
+ visual->autostereo_glsl_fragment = visual_3d_shader_from_source_file(sOpt, GL_FRAGMENT_SHADER);
}
}
break;
}
- glAttachShader(visual->glsl_program, visual->glsl_vertex);
- glAttachShader(visual->glsl_program, visual->glsl_fragment);
- glLinkProgram(visual->glsl_program);
+ glAttachShader(visual->autostereo_glsl_program, visual->base_glsl_vertex);
+ glAttachShader(visual->autostereo_glsl_program, visual->autostereo_glsl_fragment);
+ glLinkProgram(visual->autostereo_glsl_program);
}
#define DEL_SHADER(_a) if (_a) { glDeleteShader(_a); _a = 0; }
visual->yuv_glsl_program = glCreateProgram();
- if (!visual->glsl_vertex) {
- visual->glsl_vertex = glCreateShader(GL_VERTEX_SHADER);
- visual_3d_compile_shader(visual->glsl_vertex, "vertex", default_glsl_vertex);
+ if (!visual->base_glsl_vertex) {
+ visual->base_glsl_vertex = glCreateShader(GL_VERTEX_SHADER);
+ visual_3d_compile_shader(visual->base_glsl_vertex, "vertex", default_glsl_vertex);
}
visual->yuv_glsl_fragment = glCreateShader(GL_FRAGMENT_SHADER);
- visual_3d_compile_shader(visual->yuv_glsl_fragment, "fragment", glsl_yuv_shader);
+ visual_3d_compile_shader(visual->yuv_glsl_fragment, "YUV fragment", glsl_yuv_shader);
- glAttachShader(visual->yuv_glsl_program, visual->glsl_vertex);
+ glAttachShader(visual->yuv_glsl_program, visual->base_glsl_vertex);
glAttachShader(visual->yuv_glsl_program, visual->yuv_glsl_fragment);
glLinkProgram(visual->yuv_glsl_program);
}
glUniform1i(loc, i);
}
+ glUseProgram(0);
if (visual->compositor->gl_caps.rect_texture) {
Bool res;
const char *opt;
visual->yuv_rect_glsl_program = glCreateProgram();
- opt = gf_cfg_get_key(visual->compositor->user->config, "Compositor", "YUVShader");
+ opt = gf_cfg_get_key(visual->compositor->user->config, "Compositor", "YUVRectShader");
visual->yuv_rect_glsl_fragment = glCreateShader(GL_FRAGMENT_SHADER);
if (opt && !strcmp(opt, "Relaxed")) {
- res = visual_3d_compile_shader(visual->yuv_rect_glsl_fragment, "fragment", glsl_yuv_rect_shader_relaxed);
+ res = visual_3d_compile_shader(visual->yuv_rect_glsl_fragment, "YUV Rect fragment (relaxed syntax)", glsl_yuv_rect_shader_relaxed);
} else {
- res = visual_3d_compile_shader(visual->yuv_rect_glsl_fragment, "fragment", glsl_yuv_rect_shader_strict);
+ if (opt) {
+ visual->yuv_rect_glsl_fragment = visual_3d_shader_from_source_file(opt, GL_FRAGMENT_SHADER);
+ if (!visual->yuv_rect_glsl_fragment) res = GF_FALSE;
+ }
+ res = visual_3d_compile_shader(visual->yuv_rect_glsl_fragment, "YUV Rect fragment (strict syntax)", glsl_yuv_rect_shader_strict);
if (!res) {
- res = visual_3d_compile_shader(visual->yuv_rect_glsl_fragment, "fragment", glsl_yuv_rect_shader_relaxed);
+ res = visual_3d_compile_shader(visual->yuv_rect_glsl_fragment, "YUV Rect fragment (relaxed syntax)", glsl_yuv_rect_shader_relaxed);
if (res) {
- gf_cfg_set_key(visual->compositor->user->config, "Compositor", "YUVShader", "Relaxed");
- GF_LOG(GF_LOG_WARNING, GF_LOG_COMPOSE, ("[Compositor] Using relaxed syntaxed version of YUV shader\n"));
+ if (!opt) gf_cfg_set_key(visual->compositor->user->config, "Compositor", "YUVRectShader", "Relaxed");
+ GF_LOG(GF_LOG_WARNING, GF_LOG_COMPOSE, ("[Compositor] Using relaxed syntax version of YUV shader\n"));
}
+ } else {
+ GF_LOG(GF_LOG_INFO, GF_LOG_COMPOSE, ("[Compositor] Using strict syntax version of YUV shader\n"));
}
}
if (!res) {
}
if (visual->yuv_rect_glsl_program) {
- glAttachShader(visual->yuv_rect_glsl_program, visual->glsl_vertex);
+ glAttachShader(visual->yuv_rect_glsl_program, visual->base_glsl_vertex);
glAttachShader(visual->yuv_rect_glsl_program, visual->yuv_rect_glsl_fragment);
glLinkProgram(visual->yuv_rect_glsl_program);
}
glUniform1i(loc, i);
}
+
+ loc = glGetUniformLocation(visual->yuv_rect_glsl_program, "width");
+ if (loc == -1) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to locate width in YUV shader\n"));
+ }
+
+ loc = glGetUniformLocation(visual->yuv_rect_glsl_program, "height");
+ if (loc == -1) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to locate width in YUV shader\n"));
+ }
+
+ glUseProgram(0);
}
}
-
- glUseProgram(0);
}
#endif // !defined(GPAC_USE_TINYGL) && !defined(GPAC_USE_OGL_ES)
{
#if !defined(GPAC_USE_TINYGL) && !defined(GPAC_USE_OGL_ES)
- DEL_SHADER(visual->glsl_vertex);
- DEL_SHADER(visual->glsl_fragment);
+ DEL_SHADER(visual->base_glsl_vertex);
+ DEL_SHADER(visual->autostereo_glsl_fragment);
DEL_SHADER(visual->yuv_glsl_fragment);
- DEL_PROGRAM(visual->glsl_program );
+ DEL_PROGRAM(visual->autostereo_glsl_program );
DEL_PROGRAM(visual->yuv_glsl_program );
if (visual->gl_textures) {
glMatrixMode(GL_MODELVIEW);
/*use our program*/
- glUseProgram(visual->glsl_program);
+ glUseProgram(visual->autostereo_glsl_program);
/*push number of views if shader uses it*/
- loc = glGetUniformLocation(visual->glsl_program, "gfViewCount");
+ loc = glGetUniformLocation(visual->autostereo_glsl_program, "gfViewCount");
if (loc != -1) glUniform1i(loc, visual->nb_views);
glClientActiveTexture(GL_TEXTURE0);
/*bind all our textures*/
for (i=0; i<visual->nb_views; i++) {
sprintf(szTex, "gfView%d", i+1);
- loc = glGetUniformLocation(visual->glsl_program, szTex);
+ loc = glGetUniformLocation(visual->autostereo_glsl_program, szTex);
if (loc == -1) continue;
glActiveTexture(GL_TEXTURE0 + i);
Float z;
static GLfloat none[3] = { 1.0f, 0, 0 };
- data = gf_sc_texture_get_data(txh, &pixel_format);
+ data = (u8 *) gf_sc_texture_get_data(txh, &pixel_format);
if (!data) return;
if (pixel_format!=GF_PIXEL_RGBD) return;
stride = txh->stride;
if (!delta) first_pass = 2;
else first_pass = 1;
- data = gf_sc_texture_get_data(txh, &pixel_format);
+ data = (u8 *) gf_sc_texture_get_data(txh, &pixel_format);
if (!data) return;
if (pixel_format!=GF_PIXEL_RGBD) return;
stride = txh->stride;
in_strip = 0;
for (h=0; h<txh->height - 1; h++) {
- char *src = data + h*stride;
+ u8 *src = data + h*stride;
x = -1; x = gf_mulfix(x, INT2FIX(txh->width/2));
if (!tr_state->pixel_metrics) x = gf_divfix(x, tr_state->min_hsize);
Fixed f_scale = FLT2FIX(visual->compositor->depth_gl_scale);
txh->needs_refresh = 0;
- data = gf_sc_texture_get_data(txh, &pixel_format);
+ data = (u8 *) gf_sc_texture_get_data(txh, &pixel_format);
if (!data) return;
if (pixel_format!=GF_PIXEL_RGB_24_DEPTH) return;
data += txh->height*txh->width*3;
for (h=0; h<txh->height; h++) {
- char *src = data + h * txh->width;
+ u8 *src = data + h * txh->width;
for (w=0; w<txh->width; w++) {
u8 d = src[w];
Fixed z = INT2FIX(d);
#pragma comment (linker, EXPORT_SYMBOL(gf_rmdir) )
#pragma comment (linker, EXPORT_SYMBOL(gf_cleanup_dir) )
#pragma comment (linker, EXPORT_SYMBOL(gf_sys_clock) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_sys_clock_high_res) )
#pragma comment (linker, EXPORT_SYMBOL(gf_sys_get_rti) )
#pragma comment (linker, EXPORT_SYMBOL(gf_sys_get_battery_state) )
#pragma comment (linker, EXPORT_SYMBOL(gf_get_default_cache_directory) )
/*color.h exports*/
#pragma comment (linker, EXPORT_SYMBOL(gf_stretch_bits) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_color_write_yv12_10_to_yuv) )
#pragma comment (linker, EXPORT_SYMBOL(gf_cmx_init) )
#pragma comment (linker, EXPORT_SYMBOL(gf_cmx_set) )
#pragma comment (linker, EXPORT_SYMBOL(gf_cmx_set_all) )
#pragma comment (linker, EXPORT_SYMBOL(gf_isom_get_last_producer_time_box) )
#pragma comment (linker, EXPORT_SYMBOL(gf_isom_set_sync_table) )
#pragma comment (linker, EXPORT_SYMBOL(gf_isom_get_current_tfdt) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_isom_is_adobe_protection_media) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_isom_get_adobe_protection_info) )
# ifndef GPAC_DISABLE_ISOM_DUMP
#pragma comment (linker, EXPORT_SYMBOL(gf_isom_dump) )
#ifndef GPAC_DISABLE_ISOM_WRITE
#pragma comment (linker, EXPORT_SYMBOL(gf_media_change_par) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_media_remove_non_rap) )
#endif
#ifndef GPAC_DISABLE_AV_PARSERS
#pragma comment (linker, EXPORT_SYMBOL(gf_media_hevc_read_pps) )
#pragma comment (linker, EXPORT_SYMBOL(gf_hevc_get_sps_info) )
#pragma comment (linker, EXPORT_SYMBOL(gf_hevc_get_profile_name) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_hevc_get_sps_info_with_state) )
#endif //GPAC_DISABLE_HEVC
#pragma comment (linker, EXPORT_SYMBOL(gf_dom_listener_build) )
#pragma comment (linker, EXPORT_SYMBOL(gf_dom_listener_del) )
#pragma comment (linker, EXPORT_SYMBOL(gf_sg_handle_dom_event) )
-#pragma comment (linker, EXPORT_SYMBOL(sg_fire_dom_event) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_sg_fire_dom_event) )
#pragma comment (linker, EXPORT_SYMBOL(gf_mo_event_target_get) )
#pragma comment (linker, EXPORT_SYMBOL(gf_mo_event_target_remove_by_node) )
#pragma comment (linker, EXPORT_SYMBOL(gf_event_target_get_node) )
#pragma comment (linker, EXPORT_SYMBOL(gf_dash_get_group_udta) )
#pragma comment (linker, EXPORT_SYMBOL(gf_dash_set_group_udta) )
#pragma comment (linker, EXPORT_SYMBOL(gf_dash_is_group_selected) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_dash_is_group_selectable) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_dash_debug_group) )
#pragma comment (linker, EXPORT_SYMBOL(gf_dash_get_info) )
#pragma comment (linker, EXPORT_SYMBOL(gf_dash_switch_quality) )
#pragma comment (linker, EXPORT_SYMBOL(gf_dash_get_duration) )
#pragma comment (linker, EXPORT_SYMBOL(gf_dash_set_max_resolution) )
#pragma comment (linker, EXPORT_SYMBOL(gf_dash_set_min_timeout_between_404) )
#pragma comment (linker, EXPORT_SYMBOL(gf_dash_set_segment_expiration_threshold) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_dash_set_user_buffer) )
#endif
#ifndef GPAC_DISABLE_VTT
#pragma comment (linker, EXPORT_SYMBOL(gf_webvtt_js_addCue) )
#pragma comment (linker, EXPORT_SYMBOL(gf_webvtt_js_removeCues) )
#pragma comment (linker, EXPORT_SYMBOL(gf_webvtt_parse_cues_from_data) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_webvtt_parse_iso_cues) )
+#pragma comment (linker, EXPORT_SYMBOL(gf_webvtt_cue_del) )
#endif
#ifndef GPAC_DISABLE_MSE
u32 nal_size = (u8) payload[offset]; nal_size<<=8; nal_size |= (u8) payload[offset+1];
offset += 2;
nal_type = (payload[offset] & 0x7E) >> 1;
- if ((nal_type==GF_HEVC_NALU_SLICE_IDR_W_DLP) || (nal_type==GF_HEVC_NALU_SLICE_IDR_N_LP)) {
+ if ((nal_type>=GF_HEVC_NALU_SLICE_BLA_W_LP) && (nal_type<=GF_HEVC_NALU_SLICE_CRA)) {
rtp->sl_hdr.randomAccessPointFlag = 1;
}
if (is_start) gf_rtp_hevc_flush(rtp, hdr, 1);
nal_type = payload[2] & 0x3F;
- if ((nal_type==GF_HEVC_NALU_SLICE_IDR_W_DLP) || (nal_type==GF_HEVC_NALU_SLICE_IDR_N_LP)) {
+ if ((nal_type>=GF_HEVC_NALU_SLICE_BLA_W_LP) && (nal_type<=GF_HEVC_NALU_SLICE_CRA)) {
rtp->sl_hdr.randomAccessPointFlag = 1;
}
else if (!stricmp(map->payload_name, "ac3")) return GF_RTP_PAYT_AC3;
else if (!stricmp(map->payload_name, "H264-SVC")) return GF_RTP_PAYT_H264_SVC;
else if (!stricmp(map->payload_name, "H265")) return GF_RTP_PAYT_HEVC;
+ else if (!stricmp(map->payload_name, "H265-SHVC")) return GF_RTP_PAYT_SHVC;
else return 0;
}
rtp->depacketize = gf_rtp_parse_h264;
break;
case GF_RTP_PAYT_HEVC:
+ case GF_RTP_PAYT_SHVC:
#ifndef GPAC_DISABLE_HEVC
{
GF_SDP_FMTP *fmtp;
case GF_RTP_PAYT_AC3:
return gp_rtp_builder_do_ac3(builder, data, data_size, IsAUEnd, FullAUSize);
case GF_RTP_PAYT_HEVC:
+ case GF_RTP_PAYT_SHVC:
return gp_rtp_builder_do_hevc(builder, data, data_size, IsAUEnd, FullAUSize);
default:
return GF_NOT_SUPPORTED;
strcpy(szMediaName, "video");
strcpy(szPayloadName, "H265");
return 1;
+ case GF_RTP_PAYT_SHVC:
+ strcpy(szMediaName, "video");
+ strcpy(szPayloadName, "H265-SHVC");
+ return 1;
default:
strcpy(szMediaName, "");
strcpy(szPayloadName, "");
if (!nalu) do_flush = 1;
else if (builder->sl_header.accessUnitStartFlag) do_flush = 1;
/*we must NOT fragment a NALU*/
- else if (builder->bytesInPacket + nalu_size + 4 >= builder->Path_MTU) do_flush = 2;
+ else if (builder->bytesInPacket + nalu_size + 4 >= builder->Path_MTU) do_flush = 2; //2 bytes PayloadHdr for AP + 2 bytes NAL size
/*aggregation is disabled*/
else if (! (builder->flags & GP_RTP_PCK_USE_MULTI) ) do_flush = 2;
char payload_hdr[2];
char shdr;
- assert(nalu_size>=builder->Path_MTU);
+ assert(nalu_size + 4 >=builder->Path_MTU);
assert(!builder->bytesInPacket);
/*FU payload doesn't have the NAL hdr (2 bytes*/
rtp_type = GF_RTP_PAYT_HEVC;
PL_ID = 0x0F;
break;
+ /*SHVC*/
+ case GPAC_OTI_VIDEO_SHVC:
+ required_rate = 90000; /* "90 kHz clock rate MUST be used"*/
+ rtp_type = GF_RTP_PAYT_SHVC;
+ PL_ID = 0x0F;
+ break;
}
break;
strcat(sdpLine, "\n");
}
}
- else if (rtp->packetizer->rtp_payt == GF_RTP_PAYT_HEVC) {
+ else if ((rtp->packetizer->rtp_payt == GF_RTP_PAYT_HEVC) || (rtp->packetizer->rtp_payt == GF_RTP_PAYT_SHVC)) {
#ifndef GPAC_DISABLE_HEVC
GF_HEVCConfig *hevcc = dsi ? gf_odf_hevc_cfg_read(dsi, dsi_len, 0) : NULL;
if (hevcc) {
}
}
-/* FIXME : unused function
-static void merge_nalus_list(GF_List *src, GF_List *dst)
-{
- u32 i, count = gf_list_count(src);
- for (i=0; i<count; i++) {
- void *p = gf_list_get(src, i);
- if (p) gf_list_add(dst, p);
- }
-}
-
-
-static void merge_nalus(GF_MPEGVisualSampleEntryBox *entry, GF_List *sps, GF_List *pps)
-{
- if (entry->avc_config) {
- merge_nalus_list(entry->avc_config->config->sequenceParameterSets, sps);
- merge_nalus_list(entry->avc_config->config->sequenceParameterSetExtensions, sps);
- merge_nalus_list(entry->avc_config->config->pictureParameterSets, pps);
- }
- if (entry->svc_config) {
- merge_nalus_list(entry->svc_config->config->sequenceParameterSets, sps);
- merge_nalus_list(entry->svc_config->config->pictureParameterSets, pps);
- }
-}*/
static GF_Err process_extractor(GF_ISOFile *file, u32 sampleNumber, u32 nal_size, u16 nal_hdr, u32 nal_unit_size_field, Bool is_hevc, Bool rewrite_ps, Bool rewrite_start_codes, GF_BitStream *src_bs, GF_BitStream *dst_bs, u32 extractor_mode)
{
if (!ref_track_num) return GF_ISOM_INVALID_FILE;
cur_extract_mode = gf_isom_get_nalu_extract_mode(file, ref_track_num);
+
+ //we must be in inspect mode only otherwise the reference sample will not be the one stored on file (change in start codes, PS inserted or other NALUs inserted)
+ //and this will corrupt extraction (wrong data offsets)
ref_extract_mode = GF_ISOM_NALU_EXTRACT_INSPECT;
- if (rewrite_ps)
- ref_extract_mode |= GF_ISOM_NALU_EXTRACT_INBAND_PS_FLAG;
gf_isom_set_nalu_extract_mode(file, ref_track_num, ref_extract_mode);
+
ref_trak = gf_isom_get_track_from_file(file, ref_track_num);
if (!ref_trak) return GF_ISOM_INVALID_FILE;
//are there cases were this wouldn't be the case ?
if (sample_offset < -sample_offset)
sample_offset = 0;
+
e = Media_GetSample(ref_trak->Media, sampleNumber + sample_offset, &ref_samp, &di, 0, NULL);
if (e) return e;
return GF_OK;
}
-static Bool is_sample_idr(GF_ISOSample *sample, GF_MPEGVisualSampleEntryBox *entry)
+static u8 is_sample_idr(GF_ISOSample *sample, GF_MPEGVisualSampleEntryBox *entry)
{
Bool is_hevc = 0;
u32 nalu_size_field = 0;
case GF_HEVC_NALU_SLICE_BLA_W_LP:
case GF_HEVC_NALU_SLICE_BLA_W_DLP:
case GF_HEVC_NALU_SLICE_BLA_N_LP:
+ case GF_HEVC_NALU_SLICE_CRA:
+ gf_bs_del(bs);
+ return 3;
case GF_HEVC_NALU_SLICE_IDR_W_DLP:
case GF_HEVC_NALU_SLICE_IDR_N_LP:
- case GF_HEVC_NALU_SLICE_CRA:
gf_bs_del(bs);
return 1;
case GF_HEVC_NALU_ACCESS_UNIT:
return 0;
}
-/* Rewrite mode:
- * mode = 0: playback
- * mode = 1: streaming
- */
+static void nalu_merge_ps(GF_BitStream *ps_bs, Bool rewrite_start_codes, u32 nal_unit_size_field, GF_MPEGVisualSampleEntryBox *entry, Bool is_hevc)
+{
+ u32 i, count;
+ if (is_hevc) {
+ if (entry->hevc_config) {
+ count = gf_list_count(entry->hevc_config->config->param_array);
+ for (i=0; i<count; i++) {
+ GF_HEVCParamArray *ar = gf_list_get(entry->hevc_config->config->param_array, i);
+ rewrite_nalus_list(ar->nalus, ps_bs, rewrite_start_codes, nal_unit_size_field);
+ }
+ }
+ if (entry->shvc_config) {
+ count = gf_list_count(entry->shvc_config->config->param_array);
+ for (i=0; i<count; i++) {
+ GF_HEVCParamArray *ar = gf_list_get(entry->shvc_config->config->param_array, i);
+ rewrite_nalus_list(ar->nalus, ps_bs, rewrite_start_codes, nal_unit_size_field);
+ }
+ }
+ } else {
+ if (entry->avc_config) {
+ rewrite_nalus_list(entry->avc_config->config->sequenceParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field);
+ rewrite_nalus_list(entry->avc_config->config->sequenceParameterSetExtensions, ps_bs, rewrite_start_codes, nal_unit_size_field);
+ rewrite_nalus_list(entry->avc_config->config->pictureParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field);
+ }
+
+ /*add svc config */
+ if (entry->svc_config) {
+ rewrite_nalus_list(entry->svc_config->config->sequenceParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field);
+ rewrite_nalus_list(entry->svc_config->config->pictureParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field);
+ }
+ }
+}
+
+
GF_Err gf_isom_nalu_sample_rewrite(GF_MediaBox *mdia, GF_ISOSample *sample, u32 sampleNumber, GF_MPEGVisualSampleEntryBox *entry)
{
Bool is_hevc = 0;
+ //if only one sync given in the sample sync table, insert sps/pps/vps before cra/bla in hevc
+ Bool check_cra_bla = (mdia->information->sampleTable->SyncSample && mdia->information->sampleTable->SyncSample->nb_entries>1) ? 0 : 1;
+ Bool insert_nalu_delim = 1;
GF_Err e = GF_OK;
GF_ISOSample *ref_samp;
GF_BitStream *src_bs, *ref_bs, *dst_bs, *ps_bs;
rewrite_start_codes = (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_ANNEXB_FLAG) ? 1 : 0;
insert_vdrd_code = (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_VDRD_FLAG) ? 1 : 0;
- if (!entry->svc_config) insert_vdrd_code = 0;
+ if (!entry->svc_config && !entry->shvc_config) insert_vdrd_code = 0;
extractor_mode = mdia->mediaTrack->extractor_mode&0x0000FFFF;
if (extractor_mode != GF_ISOM_NALU_EXTRACT_LAYER_ONLY)
insert_vdrd_code = 0;
+ //this is a compatible HEVC, don't insert VDRD, insert NALU delim
+ if (entry->shvc_config && entry->hevc_config)
+ insert_vdrd_code = 0;
+
if (extractor_mode == GF_ISOM_NALU_EXTRACT_INSPECT) {
if (!rewrite_ps && !rewrite_start_codes)
return GF_OK;
}
}
//AVC/HEVC base, insert NALU delim
- else {
+ else if (insert_nalu_delim) {
gf_bs_write_int(dst_bs, 1, 32);
if (is_hevc) {
#ifndef GPAC_DISABLE_HEVC
}
if (rewrite_ps) {
- if (is_hevc) {
- u32 i, count;
-
- if (entry->hevc_config) {
- count = gf_list_count(entry->hevc_config->config->param_array);
- for (i=0; i<count; i++) {
- GF_HEVCParamArray *ar = gf_list_get(entry->hevc_config->config->param_array, i);
- rewrite_nalus_list(ar->nalus, ps_bs, rewrite_start_codes, nal_unit_size_field);
- }
- }
- if (entry->shvc_config) {
- count = gf_list_count(entry->shvc_config->config->param_array);
- for (i=0; i<count; i++) {
- GF_HEVCParamArray *ar = gf_list_get(entry->shvc_config->config->param_array, i);
- rewrite_nalus_list(ar->nalus, ps_bs, rewrite_start_codes, nal_unit_size_field);
+ //in inspect mode or single-layer mode just use the xPS from this layer
+ if (extractor_mode == GF_ISOM_NALU_EXTRACT_DEFAULT) {
+ u32 i;
+ GF_TrackReferenceTypeBox *scal = NULL;
+ Track_FindRef(mdia->mediaTrack, GF_4CC('s','c','a','l'), &scal);
+
+ if (scal) {
+ for (i=0; i<scal->trackIDCount; i++) {
+ GF_TrackBox *a_track = GetTrackbyID(mdia->mediaTrack->moov, scal->trackIDs[i]);
+ GF_MPEGVisualSampleEntryBox *an_entry = NULL;
+ if (a_track && a_track->Media && a_track->Media->information && a_track->Media->information->sampleTable && a_track->Media->information->sampleTable->SampleDescription)
+ an_entry = gf_list_get(a_track->Media->information->sampleTable->SampleDescription->other_boxes, 0);
+
+ if (an_entry)
+ nalu_merge_ps(ps_bs, rewrite_start_codes, nal_unit_size_field, an_entry, is_hevc);
}
}
+ }
+ nalu_merge_ps(ps_bs, rewrite_start_codes, nal_unit_size_field, entry, is_hevc);
+
+ if (is_hevc) {
/*little optimization if we are not asked to start codes: copy over the sample*/
- if (!rewrite_start_codes) {
+ if (!rewrite_start_codes && !entry->shvc_config) {
if (ps_bs) {
u8 nal_type = (sample->data[nal_unit_size_field] & 0x7E) >> 1;
//temp fix - if we detect xPS in the begining of the sample do NOT copy the ps bitstream
gf_bs_del(dst_bs);
return GF_OK;
}
- } else {
- if (entry->avc_config) {
- rewrite_nalus_list(entry->avc_config->config->sequenceParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field);
- rewrite_nalus_list(entry->avc_config->config->sequenceParameterSetExtensions, ps_bs, rewrite_start_codes, nal_unit_size_field);
- rewrite_nalus_list(entry->avc_config->config->pictureParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field);
- }
-
- /*add svc config */
- if (entry->svc_config) {
- rewrite_nalus_list(entry->svc_config->config->sequenceParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field);
- rewrite_nalus_list(entry->svc_config->config->pictureParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field);
- }
}
}
case GF_HEVC_NALU_SLICE_STSA_R:
if (temporal_id < (nal_hdr & 0x7))
temporal_id = (nal_hdr & 0x7);
+
+ case GF_HEVC_NALU_SLICE_BLA_W_LP:
+ case GF_HEVC_NALU_SLICE_BLA_W_DLP:
+ case GF_HEVC_NALU_SLICE_BLA_N_LP:
+ case GF_HEVC_NALU_SLICE_IDR_W_DLP:
+ case GF_HEVC_NALU_SLICE_IDR_N_LP:
+ case GF_HEVC_NALU_SLICE_CRA:
+ //insert xPS before CRA/BLS
+ if (check_cra_bla && !sample->IsRAP) {
+ if (ref_samp) gf_isom_sample_del(&ref_samp);
+ if (src_bs) gf_bs_del(src_bs);
+ if (ref_bs) gf_bs_del(ref_bs);
+ if (dst_bs) gf_bs_del(dst_bs);
+ if (buffer) gf_free(buffer);
+
+ sample->IsRAP=3;
+ return gf_isom_nalu_sample_rewrite(mdia, sample, sampleNumber, entry);
+ }
default:
/*rewrite nal*/
gf_bs_read_data(src_bs, buffer, nal_size-2);
return cfg_new;
}
+static void merge_avc_config(GF_AVCConfig *dst_cfg, GF_AVCConfig *src_cfg)
+{
+ GF_AVCConfig *cfg = AVC_DuplicateConfig(src_cfg);
+ while (gf_list_count(cfg->sequenceParameterSets)) {
+ GF_AVCConfigSlot *p = (GF_AVCConfigSlot*)gf_list_get(cfg->sequenceParameterSets, 0);
+ gf_list_rem(cfg->sequenceParameterSets, 0);
+ gf_list_insert(dst_cfg->sequenceParameterSets, p, 0);
+ }
+ while (gf_list_count(cfg->pictureParameterSets)) {
+ GF_AVCConfigSlot *p = (GF_AVCConfigSlot*)gf_list_get(cfg->pictureParameterSets, 0);
+ gf_list_rem(cfg->pictureParameterSets, 0);
+ gf_list_insert(dst_cfg->pictureParameterSets, p, 0);
+ }
+ gf_odf_avc_cfg_del(cfg);
+}
+
+void merge_hevc_config(GF_HEVCConfig *dst_cfg, GF_HEVCConfig *src_cfg, Bool force_insert)
+{
+ GF_HEVCConfig *cfg = HEVC_DuplicateConfig(src_cfg);
+ //merge all xPS
+ u32 i, j, count = cfg->param_array ? gf_list_count(cfg->param_array) : 0;
+ for (i=0; i<count; i++) {
+ GF_HEVCParamArray *ar_h = NULL;
+ u32 count2 = dst_cfg->param_array ? gf_list_count(dst_cfg->param_array) : 0;
+ GF_HEVCParamArray *ar = gf_list_get(cfg->param_array, i);
+ for (j=0; j<count2; j++) {
+ ar_h = gf_list_get(dst_cfg->param_array, j);
+ if (ar_h->type==ar->type) {
+ break;
+ }
+ ar_h = NULL;
+ }
+ if (!ar_h) {
+ gf_list_add(dst_cfg->param_array, ar);
+ gf_list_rem(cfg->param_array, i);
+ count--;
+ i--;
+ } else {
+ while (gf_list_count(ar->nalus)) {
+ GF_AVCConfigSlot *p = (GF_AVCConfigSlot*)gf_list_get(ar->nalus, 0);
+ gf_list_rem(ar->nalus, 0);
+ if (force_insert)
+ gf_list_insert(ar_h->nalus, p, 0);
+ else
+ gf_list_add(ar_h->nalus, p);
+ }
-void AVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *avc)
+ }
+ }
+ gf_odf_hevc_cfg_del(cfg);
+
+#define CHECK_CODE(__code) if (dst_cfg->__code < src_cfg->__code) dst_cfg->__code = src_cfg->__code;
+
+ CHECK_CODE(configurationVersion)
+ CHECK_CODE(profile_idc)
+ CHECK_CODE(profile_space)
+ CHECK_CODE(tier_flag)
+ CHECK_CODE(general_profile_compatibility_flags)
+ CHECK_CODE(progressive_source_flag)
+ CHECK_CODE(interlaced_source_flag)
+ CHECK_CODE(constraint_indicator_flags)
+ CHECK_CODE(level_idc)
+ CHECK_CODE(min_spatial_segmentation_idc)
+
+}
+
+void merge_all_config(GF_AVCConfig *avc_cfg, GF_HEVCConfig *hevc_cfg, GF_MediaBox *mdia)
+{
+ u32 i;
+ GF_TrackReferenceTypeBox *scal = NULL;
+ Track_FindRef(mdia->mediaTrack, GF_4CC('s','c','a','l'), &scal);
+
+ if (!scal) return;
+
+ for (i=0; i<scal->trackIDCount; i++) {
+ GF_TrackBox *a_track = GetTrackbyID(mdia->mediaTrack->moov, scal->trackIDs[i]);
+ GF_MPEGVisualSampleEntryBox *an_entry = NULL;
+ if (a_track && a_track->Media && a_track->Media->information && a_track->Media->information->sampleTable && a_track->Media->information->sampleTable->SampleDescription)
+ an_entry = gf_list_get(a_track->Media->information->sampleTable->SampleDescription->other_boxes, 0);
+
+ if (!an_entry) continue;
+
+ if (avc_cfg && an_entry->svc_config && an_entry->svc_config->config)
+ merge_avc_config(avc_cfg, an_entry->svc_config->config);
+
+ if (avc_cfg && an_entry->avc_config && an_entry->avc_config->config)
+ merge_avc_config(avc_cfg, an_entry->avc_config->config);
+
+ if (hevc_cfg && an_entry->shvc_config && an_entry->shvc_config->config)
+ merge_hevc_config(hevc_cfg, an_entry->shvc_config->config, GF_TRUE);
+
+ if (hevc_cfg && an_entry->hevc_config && an_entry->hevc_config->config)
+ merge_hevc_config(hevc_cfg, an_entry->hevc_config->config, GF_TRUE);
+ }
+
+ if (hevc_cfg) hevc_cfg->is_shvc = 0;
+}
+
+void AVC_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *avc, GF_MediaBox *mdia)
{
GF_AVCConfig *avcc, *svcc;
if (avc->emul_esd) gf_odf_desc_del((GF_Descriptor *)avc->emul_esd);
avcc = avc->avc_config->config ? AVC_DuplicateConfig(avc->avc_config->config) : NULL;
/*merge SVC config*/
if (avc->svc_config) {
- svcc = AVC_DuplicateConfig(avc->svc_config->config);
- while (gf_list_count(svcc->sequenceParameterSets)) {
- GF_AVCConfigSlot *p = (GF_AVCConfigSlot*)gf_list_get(svcc->sequenceParameterSets, 0);
- gf_list_rem(svcc->sequenceParameterSets, 0);
- gf_list_add(avcc->sequenceParameterSets, p);
- }
- while (gf_list_count(svcc->pictureParameterSets)) {
- GF_AVCConfigSlot *p = (GF_AVCConfigSlot*)gf_list_get(svcc->pictureParameterSets, 0);
- gf_list_rem(svcc->pictureParameterSets, 0);
- gf_list_add(avcc->pictureParameterSets, p);
- }
- gf_odf_avc_cfg_del(svcc);
+ merge_avc_config(avcc, avc->svc_config->config);
}
if (avcc) {
+ if (mdia) merge_all_config(avcc, NULL, mdia);
+
gf_odf_avc_cfg_write(avcc, &avc->emul_esd->decoderConfig->decoderSpecificInfo->data, &avc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength);
gf_odf_avc_cfg_del(avcc);
}
} else if (avc->svc_config) {
svcc = AVC_DuplicateConfig(avc->svc_config->config);
+
+ if (mdia) merge_all_config(svcc, NULL, mdia);
+
gf_odf_avc_cfg_write(svcc, &avc->emul_esd->decoderConfig->decoderSpecificInfo->data, &avc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength);
gf_odf_avc_cfg_del(svcc);
}
}
-void HEVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *hevc)
+void AVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *avc)
+{
+ AVC_RewriteESDescriptorEx(avc, NULL);
+}
+
+void HEVC_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *hevc, GF_MediaBox *mdia)
{
if (hevc->emul_esd) gf_odf_desc_del((GF_Descriptor *)hevc->emul_esd);
hevc->emul_esd = gf_odf_desc_esd_new(2);
GF_HEVCConfig *hcfg = HEVC_DuplicateConfig(hevc->hevc_config ? hevc->hevc_config->config : hevc->shvc_config->config);
if (hevc->hevc_config && hevc->shvc_config) {
- u32 j;
- GF_HEVCConfig *scfg = HEVC_DuplicateConfig(hevc->shvc_config->config);
- //merge all xPS
- u32 i, count = scfg->param_array ? gf_list_count(scfg->param_array) : 0;
- for (i=0; i<count; i++) {
- GF_HEVCParamArray *ar_h = NULL;
- u32 count2 = hcfg->param_array ? gf_list_count(hcfg->param_array) : 0;
- GF_HEVCParamArray *ar = gf_list_get(scfg->param_array, i);
- for (j=0; j<count2; j++) {
- ar_h = gf_list_get(hcfg->param_array, j);
- if (ar_h->type==ar->type) {
- break;
- }
- ar_h = NULL;
- }
- if (!ar_h) {
- gf_list_add(hcfg->param_array, ar);
- gf_list_rem(scfg->param_array, i);
- count--;
- i--;
- } else {
- gf_list_transfer(ar_h->nalus, ar->nalus);
- }
- }
- gf_odf_hevc_cfg_del(scfg);
+ //merge SHVC config to HEVC conf, so we add entry rather than insert
+ merge_hevc_config(hcfg, hevc->shvc_config->config, GF_FALSE);
}
+
+ if (mdia) merge_all_config(NULL, hcfg, mdia);
+
if (hcfg) {
+ hcfg->is_shvc = GF_FALSE;
gf_odf_hevc_cfg_write(hcfg, &hevc->emul_esd->decoderConfig->decoderSpecificInfo->data, &hevc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength);
gf_odf_hevc_cfg_del(hcfg);
}
}
}
+void HEVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *hevc)
+{
+ HEVC_RewriteESDescriptorEx(hevc, NULL);
+}
GF_Err AVC_HEVC_UpdateESD(GF_MPEGVisualSampleEntryBox *avc, GF_ESD *esd)
{
}
}
- /*update GF_AVCConfig*/
- if (!avc->svc_config) {
- if (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_HEVC) {
- if (!avc->hevc_config) avc->hevc_config = (GF_HEVCConfigurationBox *)gf_isom_box_new(GF_ISOM_BOX_TYPE_HVCC);
- if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) {
- if (avc->hevc_config->config) gf_odf_hevc_cfg_del(avc->hevc_config->config);
- avc->hevc_config->config = gf_odf_hevc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, 0);
- }
- } else {
- if (!avc->avc_config) avc->avc_config = (GF_AVCConfigurationBox *)gf_isom_box_new(GF_ISOM_BOX_TYPE_AVCC);
- if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) {
- if (avc->avc_config->config) gf_odf_avc_cfg_del(avc->avc_config->config);
- avc->avc_config->config = gf_odf_avc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength);
- }
- }
+ if (!avc->shvc_config && (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_HEVC)) {
+ if (!avc->hevc_config) avc->hevc_config = (GF_HEVCConfigurationBox *)gf_isom_box_new(GF_ISOM_BOX_TYPE_HVCC);
+ if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) {
+ if (avc->hevc_config->config) gf_odf_hevc_cfg_del(avc->hevc_config->config);
+ avc->hevc_config->config = gf_odf_hevc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, 0);
+ }
}
+ else if (!avc->svc_config && (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_AVC)) {
+ if (!avc->avc_config) avc->avc_config = (GF_AVCConfigurationBox *)gf_isom_box_new(GF_ISOM_BOX_TYPE_AVCC);
+ if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) {
+ if (avc->avc_config->config) gf_odf_avc_cfg_del(avc->avc_config->config);
+ avc->avc_config->config = gf_odf_avc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength);
+ }
+ }
+
gf_odf_desc_del((GF_Descriptor *)esd);
if (avc->hevc_config) {
HEVC_RewriteESDescriptor(avc);
}
}
-static void sgpd_write_entry(u32 grouping_type, void *entry, GF_BitStream *bs)
+void sgpd_write_entry(u32 grouping_type, void *entry, GF_BitStream *bs)
{
switch (grouping_type) {
case GF_4CC( 'r', 'o', 'l', 'l' ):
}
}
-
GF_Box *sgpd_New()
{
ISOM_DECL_BOX_ALLOC(GF_SampleGroupDescriptionBox, GF_ISOM_BOX_TYPE_SGPD);
if (ptr->okms) gf_isom_box_del((GF_Box *)ptr->okms);
if (ptr->tenc) gf_isom_box_del((GF_Box *)ptr->tenc);
if (ptr->piff_tenc) gf_isom_box_del((GF_Box *)ptr->piff_tenc);
+ if (ptr->adkm) gf_isom_box_del((GF_Box *)ptr->adkm);
gf_free(ptr);
}
if (ptr->tenc) return GF_ISOM_INVALID_FILE;
ptr->tenc = (GF_TrackEncryptionBox *)a;
return GF_OK;
+ case GF_ISOM_BOX_TYPE_ADKM:
+ if (ptr->adkm) return GF_ISOM_INVALID_FILE;
+ ptr->adkm = (GF_AdobeDRMKeyManagementSystemBox *)a;
+ return GF_OK;
case GF_ISOM_BOX_TYPE_UUID:
if (((GF_UUIDBox*)a)->internal_4cc==GF_ISOM_BOX_UUID_TENC) {
if (ptr->piff_tenc) return GF_ISOM_INVALID_FILE;
e = gf_isom_box_write((GF_Box *) ptr->tenc, bs);
if (e) return e;
}
+ if (ptr->adkm) {
+ e = gf_isom_box_write((GF_Box *) ptr->adkm, bs);
+ if (e) return e;
+ }
if (ptr->piff_tenc) {
e = gf_isom_box_write((GF_Box *) ptr->piff_tenc, bs);
if (e) return e;
if (e) return e;
ptr->size += ptr->tenc->size;
}
+ if (ptr->adkm) {
+ e = gf_isom_box_size((GF_Box *) ptr->adkm);
+ if (e) return e;
+ ptr->size += ptr->adkm->size;
+ }
if (ptr->piff_tenc) {
e = gf_isom_box_size((GF_Box *) ptr->tenc);
if (e) return e;
for (i = 0; i < sample_count; i++) {
GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i);
if (! sai->IV_size) continue;
- gf_bs_write_data(bs, (char *)sai->IV, 16);
+ gf_bs_write_data(bs, (char *)sai->IV, sai->IV_size);
gf_bs_write_u16(bs, sai->subsample_count);
for (j = 0; j < sai->subsample_count; j++) {
gf_bs_write_u16(bs, sai->subsamples[j].bytes_clear_data);
}
#endif //GPAC_DISABLE_ISOM_WRITE
+GF_Box *adkm_New()
+{
+ ISOM_DECL_BOX_ALLOC(GF_AdobeDRMKeyManagementSystemBox, GF_ISOM_BOX_TYPE_ADKM);
+ tmp->version = 1;
+ tmp->flags = 0;
+ return (GF_Box *)tmp;
+}
+
+void adkm_del(GF_Box *s)
+{
+ GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)s;
+ if (!ptr) return;
+ if (ptr->header) gf_isom_box_del((GF_Box *)ptr->header);
+ if (ptr->au_format) gf_isom_box_del((GF_Box *)ptr->au_format);
+ gf_free(s);
+}
+
+GF_Err adkm_AddBox(GF_Box *s, GF_Box *a)
+{
+ GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)s;
+ switch (a->type) {
+ case GF_ISOM_BOX_TYPE_AHDR:
+ if (ptr->header) return GF_ISOM_INVALID_FILE;
+ ptr->header = (GF_AdobeDRMHeaderBox *)a;
+ break;
+ case GF_ISOM_BOX_TYPE_ADAF:
+ if (ptr->au_format) return GF_ISOM_INVALID_FILE;
+ ptr->au_format = (GF_AdobeDRMAUFormatBox *)a;
+ break;
+
+ default:
+ return gf_isom_box_add_default(s, a);
+ }
+ return GF_OK;
+}
+
+GF_Err adkm_Read(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ e = gf_isom_full_box_read(s, bs);
+ if (e) return e;
+ return gf_isom_read_box_list(s, bs, adkm_AddBox);
+}
+
+#ifndef GPAC_DISABLE_ISOM_WRITE
+GF_Err adkm_Write(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)s;
+ if (!s) return GF_BAD_PARAM;
+ e = gf_isom_full_box_write(s, bs);
+ if (e) return e;
+ //ahdr
+ e = gf_isom_box_write((GF_Box *) ptr->header, bs);
+ if (e) return e;
+ //adaf
+ e = gf_isom_box_write((GF_Box *) ptr->au_format, bs);
+ if (e) return e;
+
+ return GF_OK;
+}
+
+GF_Err adkm_Size(GF_Box *s)
+{
+ GF_Err e;
+ GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)s;
+ if (!s) return GF_BAD_PARAM;
+ e = gf_isom_full_box_get_size(s);
+ if (e) return e;
+ e = gf_isom_box_size((GF_Box *) ptr->header);
+ if (e) return e;
+ ptr->size += ptr->header->size;
+ e = gf_isom_box_size((GF_Box *) ptr->au_format);
+ if (e) return e;
+ ptr->size += ptr->au_format->size;
+ return GF_OK;
+}
+#endif //GPAC_DISABLE_ISOM_WRITE
+
+GF_Box *ahdr_New()
+{
+ ISOM_DECL_BOX_ALLOC(GF_AdobeDRMHeaderBox, GF_ISOM_BOX_TYPE_AHDR);
+ tmp->version = 2;
+ tmp->flags = 0;
+ return (GF_Box *)tmp;
+}
+
+void ahdr_del(GF_Box *s)
+{
+ GF_AdobeDRMHeaderBox *ptr = (GF_AdobeDRMHeaderBox *)s;
+ if (!ptr) return;
+ if (ptr->std_enc_params) gf_isom_box_del((GF_Box *)ptr->std_enc_params);
+ gf_free(s);
+}
+
+
+GF_Err ahdr_AddBox(GF_Box *s, GF_Box *a)
+{
+ GF_AdobeDRMHeaderBox *ptr = (GF_AdobeDRMHeaderBox *)s;
+ switch (a->type) {
+ case GF_ISOM_BOX_TYPE_APRM:
+ if (ptr->std_enc_params) return GF_ISOM_INVALID_FILE;
+ ptr->std_enc_params = (GF_AdobeStdEncryptionParamsBox *)a;
+ break;
+
+ default:
+ return gf_isom_box_add_default(s, a);
+ }
+ return GF_OK;
+}
+
+GF_Err ahdr_Read(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ e = gf_isom_full_box_read(s, bs);
+ if (e) return e;
+ return gf_isom_read_box_list(s, bs, ahdr_AddBox);
+}
+
+#ifndef GPAC_DISABLE_ISOM_WRITE
+GF_Err ahdr_Write(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ GF_AdobeDRMHeaderBox *ptr = (GF_AdobeDRMHeaderBox *)s;
+ if (!s) return GF_BAD_PARAM;
+ e = gf_isom_full_box_write(s, bs);
+ if (e) return e;
+ e = gf_isom_box_write((GF_Box *) ptr->std_enc_params, bs);
+ if (e) return e;
+
+ return GF_OK;
+}
+
+GF_Err ahdr_Size(GF_Box *s)
+{
+ GF_Err e;
+ GF_AdobeDRMHeaderBox *ptr = (GF_AdobeDRMHeaderBox *)s;
+ if (!s) return GF_BAD_PARAM;
+ e = gf_isom_full_box_get_size(s);
+ if (e) return e;
+ e = gf_isom_box_size((GF_Box *) ptr->std_enc_params);
+ if (e) return e;
+ ptr->size += ptr->std_enc_params->size;
+ return GF_OK;
+}
+#endif //GPAC_DISABLE_ISOM_WRITE
+
+GF_Box *aprm_New()
+{
+ ISOM_DECL_BOX_ALLOC(GF_AdobeStdEncryptionParamsBox, GF_ISOM_BOX_TYPE_APRM);
+ tmp->version = 1;
+ tmp->flags = 0;
+ return (GF_Box *)tmp;
+}
+
+void aprm_del(GF_Box *s)
+{
+ GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)s;
+ if (!ptr) return;
+ if (ptr->enc_info) gf_isom_box_del((GF_Box *)ptr->enc_info);
+ if (ptr->key_info) gf_isom_box_del((GF_Box *)ptr->key_info);
+ gf_free(s);
+}
+
+GF_Err aprm_AddBox(GF_Box *s, GF_Box *a)
+{
+ GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)s;
+ switch (a->type) {
+ case GF_ISOM_BOX_TYPE_AHDR:
+ if (ptr->enc_info) return GF_ISOM_INVALID_FILE;
+ ptr->enc_info = (GF_AdobeEncryptionInfoBox *)a;
+ break;
+ case GF_ISOM_BOX_TYPE_ADAF:
+ if (ptr->key_info) return GF_ISOM_INVALID_FILE;
+ ptr->key_info = (GF_AdobeKeyInfoBox *)a;
+ break;
+
+ default:
+ return gf_isom_box_add_default(s, a);
+ }
+ return GF_OK;
+}
+
+GF_Err aprm_Read(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ e = gf_isom_full_box_read(s, bs);
+ if (e) return e;
+ return gf_isom_read_box_list(s, bs, aprm_AddBox);
+}
+
+#ifndef GPAC_DISABLE_ISOM_WRITE
+GF_Err aprm_Write(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)s;
+ if (!s) return GF_BAD_PARAM;
+ e = gf_isom_full_box_write(s, bs);
+ if (e) return e;
+ //ahdr
+ e = gf_isom_box_write((GF_Box *) ptr->enc_info, bs);
+ if (e) return e;
+ //adaf
+ e = gf_isom_box_write((GF_Box *) ptr->key_info, bs);
+ if (e) return e;
+
+ return GF_OK;
+}
+
+GF_Err aprm_Size(GF_Box *s)
+{
+ GF_Err e;
+ GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)s;
+ if (!s) return GF_BAD_PARAM;
+ e = gf_isom_full_box_get_size(s);
+ if (e) return e;
+ e = gf_isom_box_size((GF_Box *) ptr->enc_info);
+ if (e) return e;
+ ptr->size += ptr->enc_info->size;
+ e = gf_isom_box_size((GF_Box *) ptr->key_info);
+ if (e) return e;
+ ptr->size += ptr->key_info->size;
+ return GF_OK;
+}
+#endif //GPAC_DISABLE_ISOM_WRITE
+
+GF_Box *aeib_New()
+{
+ ISOM_DECL_BOX_ALLOC(GF_AdobeEncryptionInfoBox, GF_ISOM_BOX_TYPE_AEIB);
+ tmp->version = 1;
+ tmp->flags = 0;
+ return (GF_Box *)tmp;
+}
+
+void aeib_del(GF_Box *s)
+{
+ GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox*)s;
+ if (!ptr) return;
+ if (ptr->enc_algo) gf_free(ptr->enc_algo);
+ gf_free(ptr);
+}
+
+GF_Err aeib_Read(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox*)s;
+ u32 len;
+
+ e = gf_isom_full_box_read(s, bs);
+ if (e) return e;
+
+ len = (u32) ptr->size - 1;
+ if (len) {
+ if (ptr->enc_algo) return GF_ISOM_INVALID_FILE;
+ ptr->enc_algo = (char *)gf_malloc(len*sizeof(char));
+ gf_bs_read_data(bs, ptr->enc_algo, len);
+ }
+ ptr->key_length = gf_bs_read_u8(bs);
+ ptr->size = 0;
+ return GF_OK;
+}
+
+#ifndef GPAC_DISABLE_ISOM_WRITE
+
+GF_Err aeib_Write(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox *) s;
+ if (!s) return GF_BAD_PARAM;
+ e = gf_isom_full_box_write(s, bs);
+ if (e) return e;
+ if (ptr->enc_algo) {
+ gf_bs_write_data(bs, (char *) ptr->enc_algo, (u32) strlen(ptr->enc_algo));
+ gf_bs_write_u8(bs, 0); //string end
+ }
+ gf_bs_write_u8(bs, ptr->key_length);
+ return GF_OK;
+}
+
+GF_Err aeib_Size(GF_Box *s)
+{
+ GF_Err e;
+ GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox*)s;
+ e = gf_isom_full_box_get_size(s);
+ if (e) return e;
+ if (ptr->enc_algo)
+ ptr->size += strlen(ptr->enc_algo) + 1;
+ ptr->size += 1; //KeyLength
+ return GF_OK;
+}
+#endif //GPAC_DISABLE_ISOM_WRITE
+
+GF_Box *akey_New()
+{
+ ISOM_DECL_BOX_ALLOC(GF_AdobeKeyInfoBox, GF_ISOM_BOX_TYPE_AKEY);
+ tmp->version = 1;
+ tmp->flags = 0;
+ return (GF_Box *)tmp;
+}
+
+void akey_del(GF_Box *s)
+{
+ GF_AdobeKeyInfoBox *ptr = (GF_AdobeKeyInfoBox *)s;
+ if (!ptr) return;
+ if (ptr->params) gf_isom_box_del((GF_Box *)ptr->params);
+ gf_free(s);
+}
+
+GF_Err akey_AddBox(GF_Box *s, GF_Box *a)
+{
+ GF_AdobeKeyInfoBox *ptr = (GF_AdobeKeyInfoBox *)s;
+ switch (a->type) {
+ case GF_ISOM_BOX_TYPE_FLXS:
+ if (ptr->params) return GF_ISOM_INVALID_FILE;
+ ptr->params = (GF_AdobeFlashAccessParamsBox *)a;
+ break;
+ default:
+ return gf_isom_box_add_default(s, a);
+ }
+ return GF_OK;
+}
+
+GF_Err akey_Read(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ e = gf_isom_full_box_read(s, bs);
+ if (e) return e;
+
+ return gf_isom_read_box_list(s, bs, akey_AddBox);
+}
+
+#ifndef GPAC_DISABLE_ISOM_WRITE
+GF_Err akey_Write(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ GF_AdobeKeyInfoBox *ptr = (GF_AdobeKeyInfoBox *)s;
+ if (!s) return GF_BAD_PARAM;
+ e = gf_isom_full_box_write(s, bs);
+ if (e) return e;
+ e = gf_isom_box_write((GF_Box *) ptr->params, bs);
+ if (e) return e;
+
+ return GF_OK;
+}
+
+GF_Err akey_Size(GF_Box *s)
+{
+ GF_Err e;
+ GF_AdobeKeyInfoBox *ptr = (GF_AdobeKeyInfoBox *)s;
+ if (!s) return GF_BAD_PARAM;
+ e = gf_isom_full_box_get_size(s);
+ if (e) return e;
+ e = gf_isom_box_size((GF_Box *) ptr->params);
+ if (e) return e;
+ ptr->size += ptr->params->size;
+ e = gf_isom_box_size((GF_Box *) ptr->params);
+ return GF_OK;
+}
+#endif //GPAC_DISABLE_ISOM_WRITE
+
+GF_Box *flxs_New()
+{
+ ISOM_DECL_BOX_ALLOC(GF_AdobeFlashAccessParamsBox, GF_ISOM_BOX_TYPE_FLXS);
+ return (GF_Box *)tmp;
+}
+
+void flxs_del(GF_Box *s)
+{
+ GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox*)s;
+ if (!ptr) return;
+ if (ptr->metadata)
+ gf_free(ptr->metadata);
+ gf_free(ptr);
+}
+
+GF_Err flxs_Read(GF_Box *s, GF_BitStream *bs)
+{
+ GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox*)s;
+ u32 len;
+
+ len = (u32) ptr->size;
+ if (len) {
+ if (ptr->metadata) return GF_ISOM_INVALID_FILE;
+ ptr->metadata = (char *)gf_malloc(len*sizeof(char));
+ gf_bs_read_data(bs, ptr->metadata, len);
+ }
+ return GF_OK;
+}
+
+#ifndef GPAC_DISABLE_ISOM_WRITE
+
+GF_Err flxs_Write(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox *) s;
+ if (!s) return GF_BAD_PARAM;
+ e = gf_isom_box_write_header(s, bs);
+ if (e) return e;
+ if (ptr->metadata) {
+ gf_bs_write_data(bs, ptr->metadata, (u32) strlen(ptr->metadata));
+ gf_bs_write_u8(bs, 0); //string end
+ }
+ return GF_OK;
+}
+
+GF_Err flxs_Size(GF_Box *s)
+{
+ GF_Err e;
+ GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox*)s;
+ e = gf_isom_box_get_size(s);
+ if (e) return e;
+ if (ptr->metadata)
+ ptr->size += strlen(ptr->metadata) + 1;
+ return GF_OK;
+}
+#endif //GPAC_DISABLE_ISOM_WRITE
+
+GF_Box *adaf_New()
+{
+ ISOM_DECL_BOX_ALLOC(GF_AdobeDRMAUFormatBox, GF_ISOM_BOX_TYPE_ADAF);
+ return (GF_Box *)tmp;
+}
+
+void adaf_del(GF_Box *s)
+{
+ gf_free(s);
+}
+
+GF_Err adaf_Read(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox*)s;
+
+ e = gf_isom_full_box_read(s, bs);
+ if (e) return e;
+
+ ptr->selective_enc = gf_bs_read_u8(bs);
+ gf_bs_read_u8(bs);//resersed
+ ptr->IV_length = gf_bs_read_u8(bs);
+ ptr->size -= 3;
+ return GF_OK;
+}
+
+#ifndef GPAC_DISABLE_ISOM_WRITE
+
+GF_Err adaf_Write(GF_Box *s, GF_BitStream *bs)
+{
+ GF_Err e;
+ GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox *) s;
+ if (!s) return GF_BAD_PARAM;
+ e = gf_isom_full_box_write(s, bs);
+ if (e) return e;
+
+ gf_bs_write_u8(bs, ptr->selective_enc);
+ gf_bs_write_u8(bs, 0x0);
+ gf_bs_write_u8(bs, ptr->IV_length);
+ return GF_OK;
+}
+
+GF_Err adaf_Size(GF_Box *s)
+{
+ GF_Err e;
+ GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox*)s;
+ e = gf_isom_full_box_get_size(s);
+ if (e) return e;
+ ptr->size += 3;
+ return GF_OK;
+}
+#endif //GPAC_DISABLE_ISOM_WRITE
+
+
#endif /*GPAC_DISABLE_ISOM*/
case GF_ISOM_BOX_TYPE_SBTT:
return metx_dump(a, trace);
#endif
+
+ /*Adobe's protection boxes*/
+ case GF_ISOM_BOX_TYPE_ADKM:
+ return adkm_dump(a, trace);
+ case GF_ISOM_BOX_TYPE_AHDR:
+ return ahdr_dump(a, trace);
+ case GF_ISOM_BOX_TYPE_ADAF:
+ return adaf_dump(a, trace);
+ case GF_ISOM_BOX_TYPE_APRM:
+ return aprm_dump(a, trace);
+ case GF_ISOM_BOX_TYPE_AEIB:
+ return aeib_dump(a, trace);
+ case GF_ISOM_BOX_TYPE_AKEY:
+ return akey_dump(a, trace);
+ case GF_ISOM_BOX_TYPE_FLXS:
+ return flxs_dump(a, trace);
default:
return defa_dump(a, trace);
if (p->isfm) gf_box_dump(p->isfm, trace);
if (p->okms) gf_box_dump(p->okms, trace);
if (p->tenc) gf_box_dump(p->tenc, trace);
+ if (p->adkm) gf_box_dump(p->adkm, trace);
gf_box_dump_done("SchemeInformationBox", a, trace);
return GF_OK;
}
return GF_OK;
}
+GF_Err adkm_dump(GF_Box *a, FILE * trace)
+{
+ GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)a;
+ if (!a) return GF_BAD_PARAM;
+ fprintf(trace, "<GF_AdobeDRMKeyManagementSystemBox>\n");
+ DumpBox(a, trace);
+ gf_full_box_dump((GF_Box *)a, trace);
+ if (ptr->header) gf_box_dump((GF_Box *)ptr->header, trace);
+ if (ptr->au_format) gf_box_dump((GF_Box *)ptr->au_format, trace);
+ gf_box_dump_done("GF_AdobeDRMKeyManagementSystemBox", a, trace);
+ return GF_OK;
+}
+
+GF_Err ahdr_dump(GF_Box *a, FILE * trace)
+{
+ GF_AdobeDRMHeaderBox *ptr = (GF_AdobeDRMHeaderBox *)a;
+ if (!a) return GF_BAD_PARAM;
+ fprintf(trace, "<GF_AdobeDRMHeaderBox>\n");
+ DumpBox(a, trace);
+ gf_full_box_dump((GF_Box *)a, trace);
+ if (ptr->std_enc_params) gf_box_dump((GF_Box *)ptr->std_enc_params, trace);
+ gf_box_dump_done("GF_AdobeDRMHeaderBox", a, trace);
+ return GF_OK;
+}
+
+GF_Err aprm_dump(GF_Box *a, FILE * trace)
+{
+ GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)a;
+ if (!a) return GF_BAD_PARAM;
+ fprintf(trace, "<GF_AdobeStdEncryptionParamsBox>\n");
+ DumpBox(a, trace);
+ gf_full_box_dump((GF_Box *)a, trace);
+ if (ptr->enc_info) gf_box_dump((GF_Box *)ptr->enc_info, trace);
+ if (ptr->key_info) gf_box_dump((GF_Box *)ptr->key_info, trace);
+ gf_box_dump_done("GF_AdobeStdEncryptionParamsBox", a, trace);
+ return GF_OK;
+}
+
+GF_Err aeib_dump(GF_Box *a, FILE * trace)
+{
+ GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox *)a;
+ if (!a) return GF_BAD_PARAM;
+ fprintf(trace, "<GF_AdobeEncryptionInfoBox EncryptionAlgorithm=\"%s\" KeyLength=\"%d\">\n", ptr->enc_algo, ptr->key_length);
+ DumpBox(a, trace);
+ gf_full_box_dump((GF_Box *)a, trace);
+ gf_box_dump_done("GF_AdobeEncryptionInfoBox", a, trace);
+ return GF_OK;
+}
+
+GF_Err akey_dump(GF_Box *a, FILE * trace)
+{
+ GF_AdobeKeyInfoBox *ptr = (GF_AdobeKeyInfoBox *)a;
+ if (!a) return GF_BAD_PARAM;
+ fprintf(trace, "<GF_AdobeKeyInfoBox>\n");
+ DumpBox(a, trace);
+ gf_full_box_dump((GF_Box *)a, trace);
+ if (ptr->params) gf_box_dump((GF_Box *)ptr->params, trace);
+ gf_box_dump_done("GF_AdobeKeyInfoBox", a, trace);
+ return GF_OK;
+}
+
+GF_Err flxs_dump(GF_Box *a, FILE * trace)
+{
+ GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox *)a;
+ if (!a) return GF_BAD_PARAM;
+ fprintf(trace, "<GF_AdobeFlashAccessParamsBox>\n");
+ DumpBox(a, trace);
+ if (ptr->metadata)
+ fprintf(trace, "<FmrmsV2Metadata=\"%s\"/>\n", ptr->metadata);
+ gf_box_dump_done("GF_AdobeFlashAccessParamsBox", a, trace);
+ return GF_OK;
+}
+
+GF_Err adaf_dump(GF_Box *a, FILE * trace)
+{
+ GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox *)a;
+ if (!a) return GF_BAD_PARAM;
+ fprintf(trace, "<GF_AdobeDRMAUFormatBox SelectiveEncryption=\"%d\" IV_length=\"%d\">\n", ptr->selective_enc ? 1 : 0, ptr->IV_length);
+ DumpBox(a, trace);
+ gf_full_box_dump((GF_Box *)a, trace);
+ gf_box_dump_done("GF_AdobeDRMAUFormatBox", a, trace);
+ return GF_OK;
+}
+
#endif /*GPAC_DISABLE_ISOM_DUMP*/
{
GF_Err e;
GF_Box *a = NULL;
+
while (parent->size) {
e = gf_isom_parse_box_ex(&a, bs, parent_type);
if (e) {
case GF_ISOM_BOX_TYPE_SBTT: return metx_New(GF_ISOM_BOX_TYPE_SBTT);
#endif //GPAC_DISABLE_TTXT
+ case GF_ISOM_BOX_TYPE_ADKM: return adkm_New();
+ case GF_ISOM_BOX_TYPE_AHDR: return ahdr_New();
+ case GF_ISOM_BOX_TYPE_APRM: return aprm_New();
+ case GF_ISOM_BOX_TYPE_AEIB: return aeib_New();
+ case GF_ISOM_BOX_TYPE_AKEY: return akey_New();
+ case GF_ISOM_BOX_TYPE_FLXS: return flxs_New();
+ case GF_ISOM_BOX_TYPE_ADAF: return adaf_New();
+
default:
a = defa_New();
if (a) a->type = boxType;
#endif // GPAC_DISABLE_TTXT
+ case GF_ISOM_BOX_TYPE_ADKM: adkm_del(a); return;
+ case GF_ISOM_BOX_TYPE_AHDR: ahdr_del(a); return;
+ case GF_ISOM_BOX_TYPE_APRM: aprm_del(a); return;
+ case GF_ISOM_BOX_TYPE_AEIB: aeib_del(a); return;
+ case GF_ISOM_BOX_TYPE_AKEY: akey_del(a); return;
+ case GF_ISOM_BOX_TYPE_FLXS: flxs_del(a); return;
+ case GF_ISOM_BOX_TYPE_ADAF: adaf_del(a); return;
+
default:
defa_del(a);
return;
#endif // GPAC_DISABLE_TTXT
+ case GF_ISOM_BOX_TYPE_ADKM: return adkm_Read(a, bs);
+ case GF_ISOM_BOX_TYPE_AHDR: return ahdr_Read(a, bs);
+ case GF_ISOM_BOX_TYPE_APRM: return aprm_Read(a, bs);
+ case GF_ISOM_BOX_TYPE_AEIB: return aeib_Read(a, bs);
+ case GF_ISOM_BOX_TYPE_AKEY: return akey_Read(a, bs);
+ case GF_ISOM_BOX_TYPE_FLXS: return flxs_Read(a, bs);
+ case GF_ISOM_BOX_TYPE_ADAF: return adaf_Read(a, bs);
+
default:
return defa_Read(a, bs);
}
case GF_ISOM_BOX_TYPE_SBTT: return metx_Write(a, bs);
#endif//GPAC_DISABLE_TTXT
+ case GF_ISOM_BOX_TYPE_ADKM: return adkm_Write(a, bs);
+ case GF_ISOM_BOX_TYPE_AHDR: return ahdr_Write(a, bs);
+ case GF_ISOM_BOX_TYPE_APRM: return aprm_Write(a, bs);
+ case GF_ISOM_BOX_TYPE_AEIB: return aeib_Write(a, bs);
+ case GF_ISOM_BOX_TYPE_AKEY: return akey_Write(a, bs);
+ case GF_ISOM_BOX_TYPE_FLXS: return flxs_Write(a, bs);
+ case GF_ISOM_BOX_TYPE_ADAF: return adaf_Write(a, bs);
+
default:
return defa_Write(a, bs);
}
case GF_ISOM_BOX_TYPE_SBTT: return metx_Size(a);
#endif // GPAC_DISABLE_TTXT
+ case GF_ISOM_BOX_TYPE_ADKM: return adkm_Size(a);
+ case GF_ISOM_BOX_TYPE_AHDR: return ahdr_Size(a);
+ case GF_ISOM_BOX_TYPE_APRM: return aprm_Size(a);
+ case GF_ISOM_BOX_TYPE_AEIB: return aeib_Size(a);
+ case GF_ISOM_BOX_TYPE_AKEY: return akey_Size(a);
+ case GF_ISOM_BOX_TYPE_FLXS: return flxs_Size(a);
+ case GF_ISOM_BOX_TYPE_ADAF: return adaf_Size(a);
+
default: return defa_Size(a);
}
}
if (!sinf) sinf = gf_isom_get_sinf_entry(trak, sampleDescriptionIndex, GF_ISOM_CBC_SCHEME, &sea);
if (!sinf) sinf = gf_isom_get_sinf_entry(trak, sampleDescriptionIndex, GF_ISOM_ISMACRYP_SCHEME, &sea);
if (!sinf) sinf = gf_isom_get_sinf_entry(trak, sampleDescriptionIndex, GF_ISOM_OMADRM_SCHEME, &sea);
+ if (!sinf) sinf = gf_isom_get_sinf_entry(trak, sampleDescriptionIndex, GF_ISOM_ADOBE_SCHEME, &sea);
if (!sinf) return GF_OK;
sea->type = sinf->original_format->data_format;
}
-static GF_Err gf_isom_set_protected_entry(GF_ISOFile *the_file, u32 trackNumber, u32 desc_index, u32 scheme_type, u32 scheme_version, Bool is_isma, GF_ProtectionInfoBox **out_sinf)
+static GF_Err gf_isom_set_protected_entry(GF_ISOFile *the_file, u32 trackNumber, u32 desc_index, u8 version, u32 flags,
+ u32 scheme_type, u32 scheme_version, char *scheme_uri, Bool is_isma, GF_ProtectionInfoBox **out_sinf)
{
u32 original_format;
GF_Err e;
gf_list_add(sea->protections, sinf);
sinf->scheme_type = (GF_SchemeTypeBox *)schm_New();
+ sinf->scheme_type->version = version;
+ sinf->scheme_type->flags = flags;
sinf->scheme_type->scheme_type = scheme_type;
sinf->scheme_type->scheme_version = scheme_version;
+ if (sinf->scheme_type->flags == 1) {
+ sinf->scheme_type->URI = (char *)gf_malloc(sizeof(char)*strlen(scheme_uri));
+ memmove(sinf->scheme_type->URI, scheme_uri, strlen(scheme_uri));
+ }
sinf->original_format = (GF_OriginalFormatBox *)frma_New();
sinf->original_format->data_format = original_format;
GF_ProtectionInfoBox *sinf;
//setup generic protection
- e = gf_isom_set_protected_entry(the_file, trackNumber, desc_index, scheme_type, scheme_version, GF_TRUE, &sinf);
+ e = gf_isom_set_protected_entry(the_file, trackNumber, desc_index, 0, 0, scheme_type, scheme_version, NULL, GF_TRUE, &sinf);
if (e) return e;
if (scheme_uri) {
GF_Err e;
//setup generic protection
- e = gf_isom_set_protected_entry(the_file, trackNumber, desc_index, GF_ISOM_OMADRM_SCHEME, 0x00000200, GF_FALSE, &sinf);
+ e = gf_isom_set_protected_entry(the_file, trackNumber, desc_index, 0, 0, GF_ISOM_OMADRM_SCHEME, 0x00000200, NULL, GF_FALSE, &sinf);
if (e) return e;
sinf->info->okms = (GF_OMADRMKMSBox *)odkm_New();
GF_ProtectionInfoBox *sinf;
//setup generic protection
- e = gf_isom_set_protected_entry(the_file, trackNumber, desc_index, scheme_type, scheme_version, GF_FALSE, &sinf);
+ e = gf_isom_set_protected_entry(the_file, trackNumber, desc_index, 0, 0, scheme_type, scheme_version, NULL, GF_FALSE, &sinf);
if (e) return e;
sinf->info->tenc = (GF_TrackEncryptionBox *)tenc_New();
offset += (nb_saio == 1) ? prev_sai_size : 0;
cur_position = gf_bs_get_position(mdia->information->dataHandler->bs);
gf_bs_seek(mdia->information->dataHandler->bs, offset);
- buffer = (char *)malloc(size);
+ buffer = (char *)gf_malloc(size);
gf_bs_read_data(mdia->information->dataHandler->bs, buffer, size);
gf_bs_seek(mdia->information->dataHandler->bs, cur_position);
memset(*sai, 0, sizeof(GF_CENCSampleAuxInfo));
bs = gf_bs_new(buffer, size, GF_BITSTREAM_READ);
gf_bs_read_data(bs, (char *)(*sai)->IV, IV_size);
- if (size > 16) {
+ if (size > IV_size) {
(*sai)->subsample_count = gf_bs_read_u16(bs);
(*sai)->subsamples = (GF_CENCSubSampleEntry *)gf_malloc(sizeof(GF_CENCSubSampleEntry)*(*sai)->subsample_count);
for (i = 0; i < (*sai)->subsample_count; i++) {
}
gf_isom_get_sample_cenc_info_ex(trak, NULL, sampleNumber, NULL, &IV_size, NULL);
- if (!IV_size)
+ if (!IV_size) {
+ GF_SAFEALLOC( (*sai), GF_CENCSampleAuxInfo);
return GF_OK;
+ }
/*get sample auxiliary information by saiz/saio rather than by parsing senc box*/
if (gf_isom_cenc_has_saiz_saio(stbl, NULL)) {
gf_isom_cenc_get_default_info_ex(trak, sampleDescriptionIndex, default_IsEncrypted, default_IV_size, default_KID);
}
+/*
+ Adobe'protection scheme
+*/
+GF_Err gf_isom_set_adobe_protection(GF_ISOFile *the_file, u32 trackNumber, u32 desc_index, u32 scheme_type, u32 scheme_version, Bool is_selective_enc, char *metadata, u32 len)
+{
+ GF_Err e;
+ GF_ProtectionInfoBox *sinf;
+
+ //setup generic protection
+ e = gf_isom_set_protected_entry(the_file, trackNumber, desc_index, 1, 0, scheme_type, scheme_version, NULL, GF_FALSE, &sinf);
+ if (e) return e;
+
+ sinf->info->adkm = (GF_AdobeDRMKeyManagementSystemBox *)adkm_New();
+
+ sinf->info->adkm->header = (GF_AdobeDRMHeaderBox *)ahdr_New();
+
+ sinf->info->adkm->header->std_enc_params = (GF_AdobeStdEncryptionParamsBox *)aprm_New();
+
+ sinf->info->adkm->header->std_enc_params->enc_info = (GF_AdobeEncryptionInfoBox *)aeib_New();
+ if (sinf->info->adkm->header->std_enc_params->enc_info->enc_algo)
+ gf_free(sinf->info->adkm->header->std_enc_params->enc_info->enc_algo);
+ sinf->info->adkm->header->std_enc_params->enc_info->enc_algo = (char *)gf_malloc(8*sizeof(char));
+ strncpy(sinf->info->adkm->header->std_enc_params->enc_info->enc_algo, "AES-CBC", 7);
+ sinf->info->adkm->header->std_enc_params->enc_info->enc_algo[7] = 0;
+ sinf->info->adkm->header->std_enc_params->enc_info->key_length = 16;
+
+ sinf->info->adkm->header->std_enc_params->key_info = (GF_AdobeKeyInfoBox *)akey_New();
+
+ sinf->info->adkm->header->std_enc_params->key_info->params = (GF_AdobeFlashAccessParamsBox *)flxs_New();
+ if (metadata && len) {
+ if (sinf->info->adkm->header->std_enc_params->key_info->params->metadata)
+ gf_free(sinf->info->adkm->header->std_enc_params->key_info->params->metadata);
+ sinf->info->adkm->header->std_enc_params->key_info->params->metadata = (char *)gf_malloc((len+1)*sizeof(char));
+ strncpy(sinf->info->adkm->header->std_enc_params->key_info->params->metadata, metadata, len);
+ sinf->info->adkm->header->std_enc_params->key_info->params->metadata[len] = 0;
+ }
+
+ sinf->info->adkm->au_format = (GF_AdobeDRMAUFormatBox *)adaf_New();
+ sinf->info->adkm->au_format->selective_enc = is_selective_enc ? 0x10 : 0x00;
+ sinf->info->adkm->au_format->IV_length = 16;
+
+ return GF_OK;
+}
+
+GF_EXPORT
+Bool gf_isom_is_adobe_protection_media(GF_ISOFile *the_file, u32 trackNumber, u32 sampleDescriptionIndex)
+{
+ GF_TrackBox *trak;
+ GF_ProtectionInfoBox *sinf;
+
+ trak = gf_isom_get_track_from_file(the_file, trackNumber);
+ if (!trak) return GF_FALSE;
+
+ sinf = gf_isom_get_sinf_entry(trak, sampleDescriptionIndex, GF_ISOM_ADOBE_SCHEME, NULL);
+
+ if (!sinf) return GF_FALSE;
+
+ /*non-encrypted or non-ADOBE*/
+ if (!sinf->info || !sinf->info->adkm)
+ return GF_FALSE;
+
+ return GF_TRUE;
+}
+
+GF_EXPORT
+GF_Err gf_isom_get_adobe_protection_info(GF_ISOFile *the_file, u32 trackNumber, u32 sampleDescriptionIndex, u32 *outOriginalFormat, u32 *outSchemeType, u32 *outSchemeVersion)
+{
+ GF_TrackBox *trak;
+ GF_ProtectionInfoBox *sinf;
+
+ trak = gf_isom_get_track_from_file(the_file, trackNumber);
+ if (!trak) return GF_BAD_PARAM;
+
+ sinf = gf_isom_get_sinf_entry(trak, sampleDescriptionIndex, GF_ISOM_ADOBE_SCHEME, NULL);
+
+ if (!sinf) return GF_BAD_PARAM;
+
+ if (outOriginalFormat) {
+ *outOriginalFormat = sinf->original_format->data_format;
+ if (IsMP4Description(sinf->original_format->data_format)) *outOriginalFormat = GF_ISOM_SUBTYPE_MPEG4;
+ }
+ if (outSchemeType) *outSchemeType = sinf->scheme_type->scheme_type;
+ if (outSchemeVersion) *outSchemeVersion = sinf->scheme_type->scheme_version;
+
+ return GF_OK;
+}
+
#endif // GPAC_DISABLE_ISOM_FRAGMENTS
GF_EXPORT
u32 gf_isom_get_sample_duration(GF_ISOFile *the_file, u32 trackNumber, u32 sampleNumber)
{
- u64 dur;
+ u32 dur;
u64 dts;
GF_TrackBox *trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak || !sampleNumber) return 0;
+#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
+ if (sampleNumber<=trak->sample_count_at_seg_start) return 0;
+ sampleNumber -= trak->sample_count_at_seg_start;
+#endif
- stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, sampleNumber, &dur);
- if (sampleNumber == trak->Media->information->sampleTable->SampleSize->sampleCount) {
- return (u32) (trak->Media->mediaHeader->duration - dur);
- }
-
- stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, sampleNumber+1, &dts);
- return (u32) (dts - dur);
+ stbl_GetSampleDTS_and_Duration(trak->Media->information->sampleTable->TimeToSample, sampleNumber, &dts, &dur);
+ return dur;
}
u32 size = 0;
GF_TrackBox *trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak || !sampleNumber) return 0;
-
+#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
+ if (sampleNumber<=trak->sample_count_at_seg_start) return 0;
+ sampleNumber -= trak->sample_count_at_seg_start;
+#endif
stbl_GetSampleSize(trak->Media->information->sampleTable->SampleSize, sampleNumber, &size);
return size;
}
if (!trak || !sampleNumber) return 0;
if (! trak->Media->information->sampleTable->SyncSample) return 1;
+#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
+ if (sampleNumber<=trak->sample_count_at_seg_start) return 0;
+ sampleNumber -= trak->sample_count_at_seg_start;
+#endif
e = stbl_GetSampleRAP(trak->Media->information->sampleTable->SyncSample, sampleNumber, &is_rap, NULL, NULL);
if (e) return 0;
return is_rap;
if (!trak) return NULL;
if (!sampleNumber) return NULL;
+#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
+ if (sampleNumber<=trak->sample_count_at_seg_start) return NULL;
+ sampleNumber -= trak->sample_count_at_seg_start;
+#endif
samp = gf_isom_sample_new();
if (!samp) return NULL;
e = Media_GetSample(trak->Media, sampleNumber, &samp, sampleDescriptionIndex, 1, data_offset);
if (!trak) return 0;
if (!sampleNumber) return 0;
+#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
+ if (sampleNumber<=trak->sample_count_at_seg_start) return 0;
+ sampleNumber -= trak->sample_count_at_seg_start;
+#endif
if (stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, sampleNumber, &dts) != GF_OK) return 0;
return dts;
}
if (reset_tables) {
u32 type, dur;
u64 dts;
+ Bool scalable = has_scalable;
GF_SampleTableBox *stbl = trak->Media->information->sampleTable;
- if (has_scalable && !gf_isom_get_reference_count(movie, i+1, GF_ISOM_REF_SCAL))
+
+ if (scalable) {
+ //check if the base reference is in the file - if not, do not consider the track is scalable.
+ if (gf_isom_get_reference_count(movie, i+1, GF_ISOM_REF_BASE) > 0) {
+ u32 on_track=0;
+ GF_TrackBox *base;
+ gf_isom_get_reference(movie, i+1, GF_ISOM_REF_BASE, 1, &on_track);
+
+ base = gf_isom_get_track_from_file(movie, on_track);
+ if (!base) scalable = GF_FALSE;
+ }
+ }
+
+ if (scalable && !gf_isom_get_reference_count(movie, i+1, GF_ISOM_REF_SCAL))
base_track_sample_count = stbl->SampleSize->sampleCount;
- trak->sample_count_at_seg_start += has_scalable ? base_track_sample_count : stbl->SampleSize->sampleCount;
+ trak->sample_count_at_seg_start += scalable ? base_track_sample_count : stbl->SampleSize->sampleCount;
if (trak->sample_count_at_seg_start) {
GF_Err e;
e = stbl_GetSampleDTS_and_Duration(stbl->TimeToSample, stbl->SampleSize->sampleCount, &dts, &dur);
return GF_FALSE;
count = gf_isom_get_track_count(file);
for (i = 0; i < count; i++) {
- if (gf_isom_get_reference_count(file, i+1, GF_ISOM_REF_SCAL) > 0)
+ if (gf_isom_get_reference_count(file, i+1, GF_ISOM_REF_SCAL) > 0) {
return GF_TRUE;
+ }
}
return GF_FALSE;
}
return GF_OK;
}
-
-GF_Err gf_isom_add_sample_group_entry(GF_List *sampleGroups, u32 sample_number, u32 grouping_type, u32 sampleGroupDescriptionIndex)
+/*for now not exported*/
+/*expands sampleGroup table for the given grouping type and sample_number. If sample_number is 0, just appends an entry at the end of the table*/
+static GF_Err gf_isom_add_sample_group_entry(GF_List *sampleGroups, u32 sample_number, u32 grouping_type, u32 sampleGroupDescriptionIndex)
{
GF_SampleGroupBox *sgroup = NULL;
u32 i, count, last_sample_in_entry;
return GF_OK;
}
-/*for now not exported*/
-static GF_Err gf_isom_set_sample_group_info(GF_ISOFile *movie, u32 track, u32 sample_number, u32 grouping_type, void *udta, void *(*sg_create_entry)(void *udta), Bool (*sg_compare_entry)(void *udta, void *entry))
+
+static GF_Err gf_isom_set_sample_group_info_ex(GF_SampleTableBox *stbl, GF_TrackFragmentBox *traf, u32 sample_number, u32 grouping_type, void *udta, void *(*sg_create_entry)(void *udta), Bool (*sg_compare_entry)(void *udta, void *entry))
{
- GF_Err e;
- GF_TrackBox *trak;
GF_List *groupList;
void *entry;
GF_SampleGroupDescriptionBox *sgdesc = NULL;
u32 i, count, entry_idx;
- e = CanAccessMovie(movie, GF_ISOM_OPEN_WRITE);
- if (e) return e;
-
- trak = gf_isom_get_track_from_file(movie, track);
- if (!trak) return GF_BAD_PARAM;
+ if (!stbl && !traf) return GF_BAD_PARAM;
- /*look in stbl for sample sampleGroupsDescription*/
- if (!trak->Media->information->sampleTable->sampleGroupsDescription)
- trak->Media->information->sampleTable->sampleGroupsDescription = gf_list_new();
+ /*look in stbl or traf for sample sampleGroupsDescription*/
+ if (traf) {
+ if (!traf->sampleGroupsDescription)
+ traf->sampleGroupsDescription = gf_list_new();
+ groupList = traf->sampleGroupsDescription;
+ } else {
+ if (!stbl->sampleGroupsDescription)
+ stbl->sampleGroupsDescription = gf_list_new();
+ groupList = stbl->sampleGroupsDescription;
+ }
- groupList = trak->Media->information->sampleTable->sampleGroupsDescription;
count = gf_list_count(groupList);
for (i=0; i<count; i++) {
sgdesc = gf_list_get(groupList, i);
entry_idx = 1 + gf_list_find(sgdesc->group_descriptions, entry);
- /*look in stbl for sample sampleGroups*/
- if (!trak->Media->information->sampleTable->sampleGroups)
- trak->Media->information->sampleTable->sampleGroups = gf_list_new();
+ /*look in stbl or traf for sample sampleGroups*/
+ if (traf) {
+ if (!traf->sampleGroups)
+ traf->sampleGroups = gf_list_new();
+ groupList = traf->sampleGroups;
+ entry_idx |= 0x10000;
+ } else {
+ if (!stbl->sampleGroups)
+ stbl->sampleGroups = gf_list_new();
+ groupList = stbl->sampleGroups;
+ }
+
+ return gf_isom_add_sample_group_entry(groupList, sample_number, grouping_type, entry_idx);
+}
+
+/*for now not exported*/
+static GF_Err gf_isom_set_sample_group_info(GF_ISOFile *movie, u32 track, u32 sample_number, u32 grouping_type, void *udta, void *(*sg_create_entry)(void *udta), Bool (*sg_compare_entry)(void *udta, void *entry))
+{
+ GF_Err e;
+ GF_TrackBox *trak;
+
+ e = CanAccessMovie(movie, GF_ISOM_OPEN_WRITE);
+ if (e) return e;
- groupList = trak->Media->information->sampleTable->sampleGroups;
+ trak = gf_isom_get_track_from_file(movie, track);
+ if (!trak) return GF_BAD_PARAM;
- return gf_isom_add_sample_group_entry(trak->Media->information->sampleTable->sampleGroups, sample_number, grouping_type, entry_idx);
+ return gf_isom_set_sample_group_info_ex(trak->Media->information->sampleTable, NULL, sample_number, grouping_type, udta, sg_create_entry, sg_compare_entry);
}
void *sg_rap_create_entry(void *udta)
return 0;
}
+GF_Err gf_isom_copy_sample_group_entry_to_traf(GF_TrackFragmentBox *traf, GF_SampleTableBox *stbl, u32 grouping_type, u32 sampleGroupDescriptionIndex, Bool sgpd_in_traf)
+{
+ if (sgpd_in_traf) {
+ void *entry = NULL;
+ u32 i, count;
+ GF_SampleGroupDescriptionBox *sgdesc = NULL;
+ GF_BitStream *bs;
+
+ count = gf_list_count(stbl->sampleGroupsDescription);
+ for (i = 0; i < count; i++) {
+ sgdesc = (GF_SampleGroupDescriptionBox *)gf_list_get(stbl->sampleGroupsDescription, i);
+ if (sgdesc->grouping_type == grouping_type)
+ break;
+ sgdesc = NULL;
+ }
+ if (!sgdesc)
+ return GF_BAD_PARAM;
+
+ entry = gf_list_get(sgdesc->group_descriptions, sampleGroupDescriptionIndex-1);
+ if (!entry)
+ return GF_BAD_PARAM;
+
+ switch (grouping_type) {
+ case GF_4CC( 'r', 'a', 'p', ' ' ):
+ {
+ char udta[2];
+ bs = gf_bs_new(udta, 2*sizeof(char), GF_BITSTREAM_WRITE);
+ gf_bs_write_u8(bs, ((GF_VisualRandomAccessEntry *)entry)->num_leading_samples_known);
+ gf_bs_write_u8(bs, ((GF_VisualRandomAccessEntry *)entry)->num_leading_samples);
+ return gf_isom_set_sample_group_info_ex(NULL, traf, 0, grouping_type, udta, sg_rap_create_entry, sg_rap_compare_entry);
+ }
+ case GF_4CC( 'r', 'o', 'l', 'l' ):
+ {
+ char udta[2];
+ bs = gf_bs_new(udta, 2*sizeof(char), GF_BITSTREAM_WRITE);
+ gf_bs_write_u16(bs, ((GF_RollRecoveryEntry *)entry)->roll_distance);
+ return gf_isom_set_sample_group_info_ex(NULL, traf, 0, grouping_type, udta, sg_roll_create_entry, sg_roll_compare_entry);
+ }
+ case GF_4CC( 's', 'e', 'i', 'g' ):
+ {
+ char udta[20];
+ bs = gf_bs_new(udta, 20*sizeof(char), GF_BITSTREAM_WRITE);
+ gf_bs_write_u24(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->IsEncrypted);
+ gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->IV_size);
+ gf_bs_write_data(bs, (char *) ((GF_CENCSampleEncryptionGroupEntry *)entry)->KID, 16);
+ gf_bs_del(bs);
+ return gf_isom_set_sample_group_info_ex(NULL, traf, 0, grouping_type, udta, sg_encryption_create_entry, sg_encryption_compare_entry);
+ }
+ default:
+ return GF_BAD_PARAM;
+ }
+ }
+
+ return gf_isom_add_sample_group_entry(traf->sampleGroups, 0, grouping_type, sampleGroupDescriptionIndex);
+}
+
/*sample encryption information group can be in stbl or traf*/
GF_EXPORT
GF_Err gf_isom_set_sample_cenc_group(GF_ISOFile *movie, u32 track, u32 sample_number, Bool isEncrypted, u8 IV_size, bin128 KeyID)
case GF_ISOM_BOX_TYPE_AVC2:
case GF_ISOM_BOX_TYPE_AVC3:
case GF_ISOM_BOX_TYPE_AVC4:
- case GF_ISOM_BOX_TYPE_SVC1:
case GF_ISOM_BOX_TYPE_HVC1:
case GF_ISOM_BOX_TYPE_HEV1:
case GF_ISOM_BOX_TYPE_HVC2:
case GF_ISOM_BOX_TYPE_HEV2:
+ esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd;
+ break;
+ case GF_ISOM_BOX_TYPE_SVC1:
+ if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT)
+ AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia);
+ else
+ AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL);
+ esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd;
+ break;
case GF_ISOM_BOX_TYPE_SHC1:
case GF_ISOM_BOX_TYPE_SHV1:
+ if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT)
+ HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia);
+ else
+ HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL);
esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd;
break;
case GF_ISOM_BOX_TYPE_MP4A:
//The first non sync sample we see must create a syncTable
if (sample->IsRAP) {
- //insert it only if we have a sync table
- if (stbl->SyncSample) {
+ //insert it only if we have a sync table and if we have an IDR slice
+ if (stbl->SyncSample && (sample->IsRAP == 1)){
e = stbl_AddRAP(stbl->SyncSample, sampleNumber);
if (e) return e;
}
GF_TrackBox *src_trak = gf_isom_get_track_from_file(input, TrackID);
u32 boxType;
GF_SampleEncryptionBox *senc;
+ u8 IV_size;
+ u32 IsEncrypted;
if (!traf) return GF_BAD_PARAM;
sai = NULL;
+ gf_isom_get_sample_cenc_info(input, trackNum, SampleNum, &IsEncrypted, &IV_size, NULL);
e = gf_isom_cenc_get_sample_aux_info(input, trackNum, SampleNum, &sai, &boxType);
if (e) return e;
+ sai->IV_size = IV_size;
switch (boxType) {
case GF_ISOM_BOX_UUID_PSEC:
gf_list_add(senc->samp_aux_info, sai);
if (sai->subsample_count) senc->flags = 0x00000002;
- gf_isom_cenc_set_saiz_saio(senc, NULL, traf, 18+6*sai->subsample_count);
+ gf_isom_cenc_set_saiz_saio(senc, NULL, traf, IsEncrypted ? IV_size+2+6*sai->subsample_count : 0);
}
return gf_isom_add_subsample_info(traf->subs, last_sample, subSampleSize, priority, reserved, discardable);
}
-GF_Err gf_isom_fragment_copy_subsample(GF_ISOFile *dest, u32 TrackID, GF_ISOFile *orig, u32 track, u32 sampleNumber)
+GF_Err gf_isom_fragment_copy_subsample(GF_ISOFile *dest, u32 TrackID, GF_ISOFile *orig, u32 track, u32 sampleNumber, Bool sgpd_in_traf)
{
u32 i, count, last_sample;
GF_SubSampleInfoEntry *sub_sample;
first_sample_in_entry = last_sample_in_entry+1;
continue;
}
- /*found our sample, add it to trak->sampleGroups*/
+
if (!traf->sampleGroups)
traf->sampleGroups = gf_list_new();
- e = gf_isom_add_sample_group_entry(traf->sampleGroups, 0, sg->grouping_type, sg->sample_entries[j].group_description_index);
+ /*found our sample, add it to trak->sampleGroups*/
+ e = gf_isom_copy_sample_group_entry_to_traf(traf, trak->Media->information->sampleTable, sg->grouping_type, sg->sample_entries[j].group_description_index, sgpd_in_traf);
break;
}
}
GF_SttsEntry *ent;
(*DTS) = 0;
+ if (duration) {
+ *duration = 0;
+ }
if (!stts || !SampleNumber) return GF_BAD_PARAM;
ent = NULL;
} else {
stts->entries[0].sampleDelta = (u32) DTSs[1] /*- DTS[0]==0 */;
}
- for (i=0; i<stbl->SampleSize->sampleCount-1; i++) {
+ for (i=1; i<stbl->SampleSize->sampleCount-1; i++) {
if (i+1 == stbl->SampleSize->sampleCount-1) {
//and by default, our last sample has the same delta as the prev
// stts->entries[j].sampleCount++;
} else {
j++;
stts->nb_entries++;
+ if (j+1==stts->alloc_size) {
+ stts->alloc_size++;
+ stts->entries = gf_realloc(stts->entries, sizeof(GF_SttsEntry) * stts->alloc_size);
+ }
stts->entries[j].sampleCount = 1;
stts->entries[j].sampleDelta = (u32) (DTSs[i+1] - DTSs[i]);
}
stss->alloc_size = stss->nb_entries = 0;
return GF_OK;
}
- //the real pain is that we may actually not have to change anything..
- for (i=0; i<stss->nb_entries; i++) {
- if (sampleNumber == stss->sampleNumbers[i]) goto found;
- }
- //nothing to do
- return GF_OK;
-found:
- //a small opt: the sample numbers are in order...
- i++;
- for (;i<stss->nb_entries; i++) {
- stss->sampleNumbers[i-1] = stss->sampleNumbers[i];
+ for (i=0; i<stss->nb_entries; i++) {
+ //found the sample
+ if (sampleNumber == stss->sampleNumbers[i]) {
+ memmove(&stss->sampleNumbers[i], &stss->sampleNumbers[i+1], sizeof(u32)* (stss->nb_entries-i-1) );
+ stss->nb_entries--;
+ }
+
+ if (sampleNumber < stss->sampleNumbers[i]) {
+ assert(stss->sampleNumbers[i]);
+ stss->sampleNumbers[i]--;
+ }
}
- stss->nb_entries -= 1;
return GF_OK;
}
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
+Bool gf_isom_is_identical_sgpd(void *ptr1, void *ptr2, u32 grouping_type)
+{
+ GF_BitStream *bs1, *bs2;
+ char *buf1, *buf2;
+ u32 len1, len2;
+ Bool res = GF_FALSE;
+
+ if (!ptr1 || !ptr2)
+ return GF_FALSE;
+
+ bs1 = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
+ if (grouping_type) {
+ sgpd_write_entry(grouping_type, ptr1, bs1);
+ } else {
+ sgpd_Write((GF_Box *)ptr1, bs1);
+ }
+ gf_bs_get_content(bs1, &buf1, &len1);
+ gf_bs_del(bs1);
+
+ bs2 = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
+ if (grouping_type) {
+ sgpd_write_entry(grouping_type, ptr2, bs2);
+ } else {
+ sgpd_Write((GF_Box *)ptr2, bs2);
+ }
+ gf_bs_get_content(bs2, &buf2, &len2);
+ gf_bs_del(bs2);
+
+
+ if ((len1==len2) && !memcmp(buf1, buf2, len1))
+ res = GF_TRUE;
+
+ gf_free(buf1);
+ gf_free(buf2);
+
+ return res;
+}
+
GF_Err MergeTrack(GF_TrackBox *trak, GF_TrackFragmentBox *traf, u64 moof_offset, Bool is_first_merge)
{
u32 i, j, chunk_size;
if (traf->sampleGroups) {
GF_List *groups;
GF_List *groupDescs;
+ Bool is_identical_sgpd = GF_TRUE;
+ u32 *new_idx = NULL;
+
if (!trak->Media->information->sampleTable->sampleGroups)
trak->Media->information->sampleTable->sampleGroups = gf_list_new();
}
/*merge descriptions*/
else {
- u32 idx = gf_list_count(new_sgdesc->group_descriptions);
- for (j=idx; j<gf_list_count(sgdesc->group_descriptions); j++) {
- void *ptr = gf_list_get(sgdesc->group_descriptions, j);
- if (ptr) {
- gf_list_add(new_sgdesc->group_descriptions, ptr);
- gf_list_rem(sgdesc->group_descriptions, j);
- j--;
+ u32 count;
+
+ is_identical_sgpd = gf_isom_is_identical_sgpd(new_sgdesc, sgdesc, 0);
+ if (is_identical_sgpd)
+ continue;
+
+ new_idx = (u32 *)gf_malloc(gf_list_count(sgdesc->group_descriptions)*sizeof(u32));
+ count = 0;
+ while (gf_list_count(sgdesc->group_descriptions)) {
+ void *sgpd_entry = gf_list_get(sgdesc->group_descriptions, 0);
+ Bool new_entry = GF_TRUE;
+
+ for (j = 0; j < gf_list_count(new_sgdesc->group_descriptions); j++) {
+ void *ptr = gf_list_get(new_sgdesc->group_descriptions, j);
+ if (gf_isom_is_identical_sgpd(sgpd_entry, ptr, new_sgdesc->grouping_type)) {
+ new_idx[count] = j + 1;
+ count ++;
+ new_entry = GF_FALSE;
+ gf_free(sgpd_entry);
+ break;
+ }
+ }
+
+ if (new_entry) {
+ gf_list_add(new_sgdesc->group_descriptions, sgpd_entry);
+ new_idx[count] = gf_list_count(new_sgdesc->group_descriptions);
+ count ++;
}
+
+ gf_list_rem(sgdesc->group_descriptions, 0);
}
}
}
stbl_group->version = frag_group->version;
gf_list_add(groups, stbl_group);
}
- if (frag_group->entry_count && stbl_group->entry_count &&
- (frag_group->sample_entries[0].group_description_index==stbl_group->sample_entries[stbl_group->entry_count-1].group_description_index)
- ) {
- stbl_group->sample_entries[stbl_group->entry_count - 1].sample_count += frag_group->sample_entries[0].sample_count;
- if (frag_group->entry_count>1) {
- stbl_group->sample_entries = gf_realloc(stbl_group->sample_entries, sizeof(GF_SampleGroupEntry) * (stbl_group->entry_count + frag_group->entry_count - 1));
- memcpy(&stbl_group->sample_entries[stbl_group->entry_count], &frag_group->sample_entries[1], sizeof(GF_SampleGroupEntry) * (frag_group->entry_count - 1));
- stbl_group->entry_count += frag_group->entry_count - 1;
+
+ if (is_identical_sgpd) {
+ //adjust sgpd index: in traf index start at 0x1001
+ for (j = 0; j < frag_group->entry_count; j++)
+ frag_group->sample_entries[j].group_description_index &= 0x0FFFF;
+ if (frag_group->entry_count && stbl_group->entry_count &&
+ (frag_group->sample_entries[0].group_description_index==stbl_group->sample_entries[stbl_group->entry_count-1].group_description_index)
+ ) {
+ stbl_group->sample_entries[stbl_group->entry_count - 1].sample_count += frag_group->sample_entries[0].sample_count;
+ if (frag_group->entry_count>1) {
+ stbl_group->sample_entries = gf_realloc(stbl_group->sample_entries, sizeof(GF_SampleGroupEntry) * (stbl_group->entry_count + frag_group->entry_count - 1));
+ memcpy(&stbl_group->sample_entries[stbl_group->entry_count], &frag_group->sample_entries[1], sizeof(GF_SampleGroupEntry) * (frag_group->entry_count - 1));
+ stbl_group->entry_count += frag_group->entry_count - 1;
+ }
+ } else {
+ stbl_group->sample_entries = gf_realloc(stbl_group->sample_entries, sizeof(GF_SampleGroupEntry) * (stbl_group->entry_count + frag_group->entry_count));
+ memcpy(&stbl_group->sample_entries[stbl_group->entry_count], &frag_group->sample_entries[0], sizeof(GF_SampleGroupEntry) * frag_group->entry_count);
+ stbl_group->entry_count += frag_group->entry_count;
}
} else {
stbl_group->sample_entries = gf_realloc(stbl_group->sample_entries, sizeof(GF_SampleGroupEntry) * (stbl_group->entry_count + frag_group->entry_count));
+ //adjust sgpd index
+ for (j = 0; j < frag_group->entry_count; j++)
+ frag_group->sample_entries[j].group_description_index = new_idx[j];
memcpy(&stbl_group->sample_entries[stbl_group->entry_count], &frag_group->sample_entries[0], sizeof(GF_SampleGroupEntry) * frag_group->entry_count);
stbl_group->entry_count += frag_group->entry_count;
}
}
+
+ if (new_idx) gf_free(new_idx);
}
if (gf_isom_is_cenc_media(trak->moov->mov, gf_isom_get_tracknum_from_id(trak->moov, trak->Header->trackID), 1)) {
/*cur_position = gf_bs_get_position(trak->moov->mov->movieFileMap->bs);
gf_bs_seek(trak->moov->mov->movieFileMap->bs, offset);
- buffer = (char *)malloc(size);
+ buffer = (char *)gf_malloc(size);
gf_bs_read_data(trak->moov->mov->movieFileMap->bs, buffer, size);
gf_bs_seek(trak->moov->mov->movieFileMap->bs, cur_position);
/* CBC MODE */
-static GF_Err _init_mcrypt( CBC_BUFFER* buf,void *key, int lenofkey, void *IV, int size)
+static GF_Err _init_mcrypt( void* _buf,void *key, int lenofkey, void *IV, int size)
{
+ CBC_BUFFER* buf = (CBC_BUFFER* )_buf;
/* For cbc */
buf->previous_ciphertext =
buf->previous_cipher = NULL;
return GF_OUT_OF_MEM;
}
-static GF_Err _mcrypt_set_state( CBC_BUFFER* buf, void *IV, int size)
+static GF_Err _mcrypt_set_state( void* _buf, void *IV, int size)
{
/* For cbc */
+ CBC_BUFFER* buf = (CBC_BUFFER* )_buf;
memcpy(buf->previous_ciphertext, IV, size);
memcpy(buf->previous_cipher, IV, size);
return GF_OK;
}
-static GF_Err _mcrypt_get_state( CBC_BUFFER* buf, void *IV, int *size)
+static GF_Err _mcrypt_get_state( void* _buf, void *IV, int *size)
{
+ CBC_BUFFER* buf = (CBC_BUFFER* )_buf;
if (*size < buf->blocksize) {
*size = buf->blocksize;
return GF_BAD_PARAM;
}
-static void _end_mcrypt( CBC_BUFFER* buf) {
+static void _end_mcrypt( void* _buf) {
+ CBC_BUFFER* buf = (CBC_BUFFER* )_buf;
gf_free(buf->previous_ciphertext);
gf_free(buf->previous_cipher);
}
-static GF_Err _mcrypt( CBC_BUFFER* buf, void *plaintext, int len, int blocksize, void* akey, void (*func)(void*,void*), void (*func2)(void*,void*))
+static GF_Err _mcrypt( void* _buf, void *plaintext, int len, int blocksize, void* akey, void (*func)(void*,void*), void (*func2)(void*,void*))
{
+ CBC_BUFFER* buf = (CBC_BUFFER* )_buf;
u32 *fplain = plaintext;
u32 *plain;
int dblock, dlen, i, j;
-static GF_Err _mdecrypt( CBC_BUFFER* buf, void *ciphertext, int len, int blocksize,void* akey, void (*func)(void*,void*), void (*func2)(void*,void*))
+static GF_Err _mdecrypt( void* _buf, void *ciphertext, int len, int blocksize,void* akey, void (*func)(void*,void*), void (*func2)(void*,void*))
{
+ CBC_BUFFER* buf = (CBC_BUFFER* )_buf;
u32 *cipher;
u32 *fcipher = ciphertext;
int i, j, dlen, dblock;
}
}
-void bit_rate_pic_rate_info(GF_BitStream *bs, u8 level_low, u8 level_high, HEVC_VPS *vps)
+static u32 scalability_type_to_idx(HEVC_VPS *vps, u32 scalability_type)
{
- u8 i;
- for (i=level_low; i<=level_high; i++) {
- Bool bit_rate_info_present_flag = gf_bs_read_int(bs, 1);
- Bool pic_rate_info_present_flag = gf_bs_read_int(bs, 1);
- if (bit_rate_info_present_flag) {
- vps->rates[i].avg_bit_rate = gf_bs_read_int(bs, 16);
- vps->rates[i].max_bit_rate = gf_bs_read_int(bs, 16);
+ u32 idx = 0, type;
+ for (type=0; type < scalability_type; type++) {
+ idx += (vps->scalability_mask[type] ? 1 : 0 );
+ }
+ return idx;
+}
+
+#define SHVC_VIEW_ORDER_INDEX 1
+#define SHVC_SCALABILITY_INDEX 2
+
+static u32 shvc_get_scalability_id(HEVC_VPS *vps, u32 layer_id_in_vps, u32 scalability_type )
+{
+ u32 idx;
+ if (!vps->scalability_mask[scalability_type]) return 0;
+ idx = scalability_type_to_idx(vps, scalability_type);
+ return vps->dimension_id[layer_id_in_vps][idx];
+}
+
+static u32 shvc_get_view_index(HEVC_VPS *vps, u32 id)
+{
+ return shvc_get_scalability_id(vps, vps->layer_id_in_vps[id], SHVC_VIEW_ORDER_INDEX);
+}
+
+static u32 shvc_get_num_views(HEVC_VPS *vps)
+{
+ u32 numViews = 1, i;
+ for (i=0; i<vps->max_layers; i++ ) {
+ u32 layer_id = vps->layer_id_in_nuh[i];
+ if (i>0 && ( shvc_get_view_index( vps, layer_id) != shvc_get_scalability_id( vps, i-1, SHVC_VIEW_ORDER_INDEX) )) {
+ numViews++;
+ }
+ }
+ return numViews;
+}
+
+static void shvc_parse_rep_format(HEVC_RepFormat *fmt, GF_BitStream *bs)
+{
+ u8 chroma_bitdepth_present_flag = gf_bs_read_int(bs, 1);
+ fmt->pic_width_luma_samples = gf_bs_read_int(bs, 16);
+ fmt->pic_height_luma_samples = gf_bs_read_int(bs, 16);
+ if (chroma_bitdepth_present_flag) {
+ fmt->chroma_format_idc = gf_bs_read_int(bs, 2);
+
+ if (fmt->chroma_format_idc == 3)
+ fmt->separate_colour_plane_flag = gf_bs_read_int(bs, 1);
+ fmt->bit_depth_luma = 1 + gf_bs_read_int(bs, 4);
+ fmt->bit_depth_chroma = 1 + gf_bs_read_int(bs, 4);
+ }
+}
+
+static void hevc_parse_vps_extension(HEVC_VPS *vps, GF_BitStream *bs)
+{
+ u8 splitting_flag, vps_nuh_layer_id_present_flag, view_id_len;
+ u32 i, j, NumScalabilityTypes, num_profile_tier_level, num_add_output_layer_sets, NumOutputLayerSets;
+ u8 dimension_id_len[62];
+ u8 direct_dependency_flag[62][62];
+ u8 /*avc_base_layer_flag, */vps_number_layer_sets, /*default_one_target_output_layer_flag, */rep_format_idx_present_flag;
+
+ /*avc_base_layer_flag = */gf_bs_read_int(bs, 1);
+ splitting_flag = gf_bs_read_int(bs, 1);
+ NumScalabilityTypes =0;
+ for (i=0; i<16; i++) {
+ vps->scalability_mask[i] = gf_bs_read_int(bs, 1);
+ NumScalabilityTypes += vps->scalability_mask[i];
+ }
+ dimension_id_len[0] = 0;
+ for (i=0; i<(NumScalabilityTypes - splitting_flag); i++) {
+ dimension_id_len[i] = 1 + gf_bs_read_int(bs, 3);
+ }
+
+ vps->layer_id_in_nuh[0] = 0;
+ vps->layer_id_in_vps[0] = 0;
+ vps_nuh_layer_id_present_flag = gf_bs_read_int(bs, 1);
+ for (i=1; i<vps->max_layers; i++) {
+ if (vps_nuh_layer_id_present_flag) {
+ vps->layer_id_in_nuh[i] = gf_bs_read_int(bs, 6);
+ } else {
+ vps->layer_id_in_nuh[i] = i;
+ }
+ vps->layer_id_in_vps[vps->layer_id_in_nuh[i]] = i;
+
+ if (!splitting_flag) {
+ for (j=0; j<NumScalabilityTypes; j++) {
+ vps->dimension_id[i][j] = gf_bs_read_int(bs, dimension_id_len[j]);
+ }
+ }
+ }
+
+ view_id_len = gf_bs_read_int(bs, 4);
+ for( i = 0; i < shvc_get_num_views(vps); i++ ){
+ /*m_viewIdVal[i] = */ gf_bs_read_int(bs, view_id_len + 1);
+ }
+
+ for (i=1; i<vps->max_layers; i++) {
+ for (j=0; j<i; j++) {
+ direct_dependency_flag[i][j] = gf_bs_read_int(bs, 1);
+ }
+ }
+
+ if (/*vps_sub_layers_max_minus1_present_flag*/gf_bs_read_int(bs, 1)) {
+ for (i=0; i < vps->max_layers - 1; i++) {
+ /*sub_layers_vps_max_minus1[ i ]*/gf_bs_read_int(bs, 3);
+ }
+ }
+
+ if (/*max_tid_ref_present_flag = */gf_bs_read_int(bs, 1)) {
+ for (i=0; i<vps->max_layers ; i++) {
+ for (j= i+1; j < vps->max_layers; j++) {
+ if (direct_dependency_flag[j][i])
+ /*max_tid_il_ref_pics_plus1[ i ][ j ]*/gf_bs_read_int(bs, 3);
+ }
}
- if (pic_rate_info_present_flag) {
- vps->rates[i].constand_pic_rate_idc = gf_bs_read_int(bs, 2);
- vps->rates[i].avg_pic_rate = gf_bs_read_int(bs, 16);
+ }
+ /*all_ref_layers_active_flag*/gf_bs_read_int(bs, 1);
+
+ vps_number_layer_sets = 1+gf_bs_read_int(bs, 10);
+ num_profile_tier_level = 1+gf_bs_read_int(bs, 6);
+
+ for (i=1; i < num_profile_tier_level; i++) {
+ Bool vps_profile_present_flag = gf_bs_read_int(bs, 1);
+ if (!vps_profile_present_flag) {
+ /*vps->profile_ref[i] = */gf_bs_read_int(bs, 6);
}
+ profile_tier_level(bs, vps_profile_present_flag, vps->max_sub_layers-1, &vps->ext_ptl[i-1] );
+ }
+ NumOutputLayerSets = vps_number_layer_sets;
+ if (/*more_output_layer_sets_than_default_flag */gf_bs_read_int(bs, 1)) {
+ num_add_output_layer_sets = gf_bs_read_int(bs, 10)+1;
+ NumOutputLayerSets += num_add_output_layer_sets;
}
+
+ /*default_one_target_output_layer_flag = 0;*/
+ if (NumOutputLayerSets > 1) {
+ /*default_one_target_output_layer_flag = */gf_bs_read_int(bs, 1);
+ }
+ vps->profile_level_tier_idx[0] = 0;
+ for (i=1; i<NumOutputLayerSets; i++) {
+ u32 nb_bits;
+ if( i > vps->num_layer_sets - 1) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] VPS Extensions: not supported number of layers\n"));
+ }
+ nb_bits = 1;
+ while ((u32) (1 << nb_bits) < num_profile_tier_level) {
+ nb_bits++;
+ }
+ vps->profile_level_tier_idx[i] = gf_bs_read_int(bs, nb_bits);
+ }
+
+ if (vps->max_layers - 1 > 0 )
+ /*alt_output_layer_flag*/gf_bs_read_int(bs, 1);
+
+ rep_format_idx_present_flag = gf_bs_read_int(bs, 1);
+ if (rep_format_idx_present_flag ) {
+ vps->num_rep_formats = 1 + gf_bs_read_int(bs, 8);
+ } else {
+ vps->num_rep_formats = vps->max_layers;
+ }
+ for (i=0; i<vps->num_rep_formats; i++) {
+ shvc_parse_rep_format(&vps->rep_formats[i], bs);
+ }
+ vps->rep_format_idx[0] = 0;
+ for (i=1; i<vps->max_layers; i++) {
+ if (rep_format_idx_present_flag) {
+ if (vps->num_rep_formats > 1) {
+ vps->rep_format_idx[i] = gf_bs_read_int(bs, 8);
+ } else {
+ vps->rep_format_idx[i] = 0;
+ }
+ } else {
+ vps->rep_format_idx[i] = i;
+ }
+ }
+ //TODO - we don't use the rest ...
+
}
+GF_EXPORT
s32 gf_media_hevc_read_vps(char *data, u32 size, HEVCState *hevc)
{
GF_BitStream *bs;
+ u8 vps_sub_layer_ordering_info_present_flag, vps_extension_flag;
char *data_without_emulation_bytes = NULL;
u32 data_without_emulation_bytes_size = 0;
+ u32 i, j;
s32 vps_id = -1;
HEVC_VPS *vps;
if (vps_id>=16) goto exit;
- vps = &hevc->vps[vps_id];
+ vps = &hevc->vps[vps_id];
if (!vps->state) {
vps->id = vps_id;
vps->state = 1;
}
/* vps_reserved_three_2bits = */ gf_bs_read_int(bs, 2);
- /* vps_reserved_zero_6bits = */ gf_bs_read_int(bs, 6);
- vps->max_sub_layer = gf_bs_read_int(bs, 3) + 1;
+ vps->max_layers = 1 + gf_bs_read_int(bs, 6);
+ vps->max_sub_layers = gf_bs_read_int(bs, 3) + 1;
vps->temporal_id_nesting = gf_bs_read_int(bs, 1);
/* vps_reserved_ffff_16bits = */ gf_bs_read_int(bs, 16);
- profile_tier_level(bs, 1, vps->max_sub_layer-1, &vps->ptl);
- bit_rate_pic_rate_info(bs, 0, vps->max_sub_layer-1, vps);
-
+ profile_tier_level(bs, 1, vps->max_sub_layers-1, &vps->ptl);
+
+ vps_sub_layer_ordering_info_present_flag = gf_bs_read_int(bs, 1);
+ for (i=(vps_sub_layer_ordering_info_present_flag ? 0 : vps->max_sub_layers - 1); i < vps->max_sub_layers; i++) {
+ /*vps_max_dec_pic_buffering_minus1[i] = */bs_get_ue(bs);
+ /*vps_max_num_reorder_pics[i] = */bs_get_ue(bs);
+ /*vps_max_latency_increase_plus1[i] = */bs_get_ue(bs);
+ }
+ vps->max_layer_id = gf_bs_read_int(bs, 6);
+ vps->num_layer_sets = bs_get_ue(bs) + 1;
+ for (i=1; i < vps->num_layer_sets; i++) {
+ for (j=0; j <= vps->max_layer_id; j++) {
+ /*layer_id_included_flag[ i ][ j ]*/gf_bs_read_int(bs, 1);
+ }
+ }
+ if (/*vps_timing_info_present_flag*/gf_bs_read_int(bs, 1)) {
+ u32 vps_num_hrd_parameters;
+ /*u32 vps_num_units_in_tick = */gf_bs_read_int(bs, 32);
+ /*u32 vps_time_scale = */gf_bs_read_int(bs, 32);
+ if (/*vps_poc_proportional_to_timing_flag*/gf_bs_read_int(bs, 1)) {
+ /*vps_num_ticks_poc_diff_one_minus1*/bs_get_ue(bs);
+ }
+ vps_num_hrd_parameters = bs_get_ue(bs);
+ for( i = 0; i < vps_num_hrd_parameters; i++ ) {
+ //Bool cprms_present_flag=1;
+ /*hrd_layer_set_idx[ i ] = */bs_get_ue(bs);
+ if (i>0)
+ /*cprms_present_flag = */gf_bs_read_int(bs, 1) ;
+ // hevc_parse_hrd_parameters(cprms_present_flag, vps->max_sub_layers - 1);
+ }
+ }
+ vps_extension_flag = gf_bs_read_int(bs, 1);
+ if (vps_extension_flag ) {
+ gf_bs_align(bs);
+ hevc_parse_vps_extension(vps, bs);
+ vps_extension_flag = gf_bs_read_int(bs, 1);
+ }
- //and we don't care about the rest for now
exit:
gf_bs_del(bs);
gf_free(data_without_emulation_bytes);
char *data_without_emulation_bytes = NULL;
u32 data_without_emulation_bytes_size = 0;
s32 vps_id, sps_id = -1;
- u8 max_sub_layers_minus1, flag;
- u8 layer_id/*, temporal_id*/;
- Bool update_rep_format_flag;
+ u8 max_sub_layers_minus1, update_rep_format_flag, flag;
+ u8 layer_id/*, temporal_id, sps_rep_format_idx*/;
+ Bool scaling_list_enable_flag;
u32 i, nb_CTUs, depth;
u32 log2_diff_max_min_luma_coding_block_size;
u32 log2_min_transform_block_size, log2_min_luma_coding_block_size;
-
Bool sps_sub_layer_ordering_info_present_flag;
HEVC_SPS *sps;
+ HEVC_VPS *vps;
HEVC_ProfileTierLevel ptl;
if (vui_flag_pos) *vui_flag_pos = 0;
data_without_emulation_bytes = gf_malloc(size*sizeof(char));
data_without_emulation_bytes_size = avc_remove_emulation_bytes(data, data_without_emulation_bytes, size);
bs = gf_bs_new(data_without_emulation_bytes, data_without_emulation_bytes_size, GF_BITSTREAM_READ);
-// bs = gf_bs_new(data, size, GF_BITSTREAM_READ);
if (!bs) goto exit;
gf_bs_read_int(bs, 7);
}
memset(&ptl, 0, sizeof(ptl));
-
- //fixme with latest shvc syntax !!
-// if (layer_id == 0)
- {
+ max_sub_layers_minus1 = 0;
+ if (layer_id == 0) {
max_sub_layers_minus1 = gf_bs_read_int(bs, 3);
/*temporal_id_nesting_flag = */gf_bs_read_int(bs, 1);
profile_tier_level(bs, 1, max_sub_layers_minus1, &ptl);
sps_id = -1;
goto exit;
}
- //fixme with latest shvc syntax !!
- if (layer_id) sps_id=1;
sps = &hevc->sps[sps_id];
if (!sps->state) {
sps->vps_id = vps_id;
}
sps->ptl = ptl;
+ vps = &hevc->vps[vps_id];
+ //sps_rep_format_idx = 0;
+ update_rep_format_flag = 0;
if (layer_id > 0) {
-// update_rep_format_flag = gf_bs_read_int(bs, 1);
- update_rep_format_flag = 1;
+ update_rep_format_flag = gf_bs_read_int(bs, 1);
+ if (update_rep_format_flag) {
+ sps->rep_format_idx = gf_bs_read_int(bs, 8);
+ } else {
+ sps->rep_format_idx = vps->rep_format_idx[layer_id];
+ }
+ sps->width = vps->rep_formats[sps->rep_format_idx].pic_width_luma_samples;
+ sps->height = vps->rep_formats[sps->rep_format_idx].pic_height_luma_samples;
+ sps->chroma_format_idc = vps->rep_formats[sps->rep_format_idx].chroma_format_idc;
+ sps->bit_depth_luma = vps->rep_formats[sps->rep_format_idx].bit_depth_luma;
+ sps->bit_depth_chroma = vps->rep_formats[sps->rep_format_idx].bit_depth_chroma;
+ sps->separate_colour_plane_flag = vps->rep_formats[sps->rep_format_idx].separate_colour_plane_flag;
+
+ //TODO this is crude ...
+ sps->ptl = vps->ext_ptl[0];
} else {
- update_rep_format_flag = 1;
- }
- if (update_rep_format_flag) {
sps->chroma_format_idc = bs_get_ue(bs);
if (sps->chroma_format_idc==3)
sps->separate_colour_plane_flag = gf_bs_read_int(bs, 1);
sps->cw_top = bs_get_ue(bs);
sps->cw_bottom = bs_get_ue(bs);
}
- if (update_rep_format_flag) {
+ if (layer_id == 0) {
sps->bit_depth_luma = 8 + bs_get_ue(bs);
sps->bit_depth_chroma = 8 + bs_get_ue(bs);
}
sps->log2_max_pic_order_cnt_lsb = 4 + bs_get_ue(bs);
- sps_sub_layer_ordering_info_present_flag = gf_bs_read_int(bs, 1);
- for(i=sps_sub_layer_ordering_info_present_flag ? 0 : max_sub_layers_minus1; i<=max_sub_layers_minus1; i++) {
- /*max_dec_pic_buffering = */ bs_get_ue(bs);
- /*num_reorder_pics = */ bs_get_ue(bs);
- /*max_latency_increase = */ bs_get_ue(bs);
+ if (layer_id == 0) {
+ sps_sub_layer_ordering_info_present_flag = gf_bs_read_int(bs, 1);
+ for(i=sps_sub_layer_ordering_info_present_flag ? 0 : max_sub_layers_minus1; i<=max_sub_layers_minus1; i++) {
+ /*max_dec_pic_buffering = */ bs_get_ue(bs);
+ /*num_reorder_pics = */ bs_get_ue(bs);
+ /*max_latency_increase = */ bs_get_ue(bs);
+ }
}
log2_min_luma_coding_block_size = 3 + bs_get_ue(bs);
sps->bitsSliceSegmentAddress++;
}
- if (/*scaling_list_enable_flag = */ gf_bs_read_int(bs, 1)) {
- if (/*sps_scaling_list_data_present_flag=*/gf_bs_read_int(bs, 1) ) {
- //scaling_list_data( )
+ scaling_list_enable_flag = gf_bs_read_int(bs, 1);
+ if (scaling_list_enable_flag) {
+ Bool sps_infer_scaling_list_flag = 0;
+ /*u8 sps_scaling_list_ref_layer_id = 0;*/
+ if (layer_id>0) {
+ sps_infer_scaling_list_flag = gf_bs_read_int(bs, 1);
+ }
+
+ if (sps_infer_scaling_list_flag) {
+ /*sps_scaling_list_ref_layer_id = */gf_bs_read_int(bs, 6);
+ } else {
+ if (/*sps_scaling_list_data_present_flag=*/gf_bs_read_int(bs, 1) ) {
+ //scaling_list_data( )
+ }
}
}
/*asymmetric_motion_partitions_enabled_flag= */ gf_bs_read_int(bs, 1);
}
GF_EXPORT
-GF_Err gf_hevc_get_sps_info(char *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d)
+GF_Err gf_hevc_get_sps_info_with_state(HEVCState *hevc, char *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d)
{
- HEVCState hevc;
s32 idx;
- memset(&hevc, 0, sizeof(HEVCState));
- hevc.sps_active_idx = -1;
-
- idx = gf_media_hevc_read_sps(sps_data, sps_size, &hevc);
+ idx = gf_media_hevc_read_sps(sps_data, sps_size, hevc);
if (idx<0) {
return GF_NON_COMPLIANT_BITSTREAM;
}
if (sps_id) *sps_id = idx;
- if (width) *width = hevc.sps[idx].width;
- if (height) *height = hevc.sps[idx].height;
- if (par_n) *par_n = hevc.sps[idx].aspect_ratio_info_present_flag ? hevc.sps[idx].sar_width : (u32) -1;
- if (par_d) *par_d = hevc.sps[idx].aspect_ratio_info_present_flag ? hevc.sps[idx].sar_height : (u32) -1;
+ if (width) *width = hevc->sps[idx].width;
+ if (height) *height = hevc->sps[idx].height;
+ if (par_n) *par_n = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_width : (u32) -1;
+ if (par_d) *par_d = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_height : (u32) -1;
return GF_OK;
}
+GF_EXPORT
+GF_Err gf_hevc_get_sps_info(char *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d)
+{
+ HEVCState hevc;
+ memset(&hevc, 0, sizeof(HEVCState));
+ hevc.sps_active_idx = -1;
+ return gf_hevc_get_sps_info_with_state(&hevc, sps_data, sps_size, sps_id, width, height, par_n, par_d);
+}
+
#endif //GPAC_DISABLE_HEVC
static u32 AC3_FindSyncCode(u8 *buf, u32 buflen)
/*set to 1 if you want MPD to use SegmentTemplate if possible instead of SegmentList*/
#define M3U8_TO_MPD_USE_TEMPLATE 0
-/*uncomment to only play the first adaptation set*/
-//#define DEBUG_FIRST_SET_ONLY
-/*uncomment to play all but the first adaptation set*/
-//#define DEBUG_SKIP_FIRST_SET
-
typedef enum {
GF_DASH_STATE_STOPPED = 0,
/*period setup and playback chain creation*/
char *base_url;
u32 max_cache_duration, max_width, max_height;
+ u8 max_bit_per_pixel;
u32 auto_switch_count;
Bool keep_files, disable_switching, allow_local_mpd_update, enable_buffering, estimate_utc_drift;
Bool is_m3u8;
Bool force_mpd_update;
+ u32 user_buffer_ms;
+
u32 min_timeout_between_404, segment_lost_after_ms;
+
+ s32 debug_group_index;
};
static void gf_dash_seek_group(GF_DashClient *dash, GF_DASH_Group *group);
/* maximum representation index we want to download*/
u32 force_max_rep_index;
+ //start time of currently downloaded segment - for now only used for merging SegmentTimeline, but we should use this to resync across representations ...
+ u64 current_start_time;
+ u32 current_timescale;
void *udta;
};
return NULL;
}
-static void gf_dash_buffer_off(GF_DASH_Group *group, GF_DashClient *dash)
+static void gf_dash_buffer_off(GF_DASH_Group *group)
{
- if (!dash->enable_buffering) return;
+ if (!group->dash->enable_buffering) return;
if (group->buffering) {
- assert(dash->nb_buffering);
- dash->nb_buffering--;
- if (!dash->nb_buffering) {
- dash->dash_io->on_dash_event(dash->dash_io, GF_DASH_EVENT_BUFFER_DONE, -1, GF_OK);
+ assert(group->dash->nb_buffering);
+ group->dash->nb_buffering--;
+ if (!group->dash->nb_buffering) {
+ group->dash->dash_io->on_dash_event(group->dash->dash_io, GF_DASH_EVENT_BUFFER_DONE, -1, GF_OK);
GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[DASH] Session buffering done\n"));
}
group->buffering = 0;
}
}
-static void gf_dash_buffer_on(GF_DASH_Group *group, GF_DashClient *dash)
+static void gf_dash_buffer_on(GF_DASH_Group *group)
{
- if (!dash->enable_buffering) return;
- if ((group->selection==GF_DASH_GROUP_SELECTED) && !group->buffering) {
- if (!dash->nb_buffering) {
+ if (!group->dash->enable_buffering) return;
+
+ if (!group->buffering) {
+ if (!group->dash->nb_buffering) {
GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[DASH] Starting session buffering\n"));
}
- dash->nb_buffering++;
+ group->dash->nb_buffering++;
group->buffering = 1;
}
}
dash->dash_io->on_dash_event(dash->dash_io, GF_DASH_EVENT_BUFFERING, -1, GF_OK);
if (group->cached[0].duration && group->nb_cached_segments>=group->max_buffer_segments)
- gf_dash_buffer_off(group, dash);
+ gf_dash_buffer_off(group);
}
}
GF_MPD_SegmentTimeline *timeline = NULL;
GF_MPD_Representation *rep = NULL;
u32 shift, timescale;
- u64 current_time, availabilityStartTime;
+ u64 current_time, current_time_no_timeshift, availabilityStartTime;
u32 ast_diff, start_number;
Double ast_offset = 0;
u64 utc;
sscanf(val, LLU, &utc);
group->dash->utc_drift_estimate = (s32) ((s64) fetch_time - (s64) utc);
- GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[DASH] Estimated UTC diff between client and server %d ms\n", group->dash->utc_drift_estimate));
+ GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[DASH] Estimated UTC diff between client and server %d ms (UTC fetch "LLU" - server UTC "LLU" - MPD AST "LLU" - MPD PublishTime "LLU"\n", group->dash->utc_drift_estimate, fetch_time, utc, group->dash->mpd->availabilityStartTime, group->dash->mpd->publishTime));
} else {
val = group->dash->dash_io->get_header_value(group->dash->dash_io, group->dash->mpd_dnload, "Date");
if (val) {
u64 utc = gf_net_parse_date(val);
if (utc) {
group->dash->utc_drift_estimate = (s32) ((s64) fetch_time - (s64) utc);
- GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[DASH] Estimated UTC diff between client and server %d ms\n", group->dash->utc_drift_estimate));
+ GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[DASH] Estimated UTC diff between client and server %d ms (UTC fetch "LLU" - server UTC "LLU" - MPD AST "LLU" - MPD PublishTime "LLU"\n", group->dash->utc_drift_estimate, fetch_time, utc, group->dash->mpd->availabilityStartTime, group->dash->mpd->publishTime));
}
}
}
}
#endif
+ current_time_no_timeshift = current_time;
if ( ((s32) mpd->time_shift_buffer_depth>=0)) {
- shift = mpd->time_shift_buffer_depth;
- shift *= group->dash->initial_time_shift_percent;
- shift /= 100;
- if (current_time < shift) current_time = 0;
- else current_time -= shift;
+ if (group->dash->initial_time_shift_percent) {
+ shift = mpd->time_shift_buffer_depth;
+ shift *= group->dash->initial_time_shift_percent;
+ shift /= 100;
+
+ if (current_time < shift) current_time = 0;
+ else current_time -= shift;
+ } else if (group->dash->user_buffer_ms) {
+ shift = MIN(group->dash->user_buffer_ms, mpd->time_shift_buffer_depth);
+
+ if (current_time < shift) current_time = 0;
+ else current_time -= shift;
+ }
}
timeline = NULL;
if (timeline) {
u64 start_segtime = 0;
u64 segtime = 0;
+ u64 current_time_rescale;
+ u64 timeline_duration = 0;
+ u32 count;
u32 i, seg_idx = 0;
- current_time /= 1000;
- current_time *= timescale;
- for (i=0; i<gf_list_count(timeline->entries); i++) {
+
+ current_time_rescale = current_time;
+ current_time_rescale /= 1000;
+ current_time_rescale *= timescale;
+
+ count = gf_list_count(timeline->entries);
+ for (i=0; i<count; i++) {
+ GF_MPD_SegmentTimelineEntry *ent = gf_list_get(timeline->entries, i);
+
+ if (!i && (current_time_rescale + ent->duration < ent->start_time)) {
+ current_time_rescale = current_time_no_timeshift * timescale / 1000;
+ }
+ timeline_duration += (1+ent->repeat_count)*ent->duration;
+
+ if (i+1 == count) timeline_duration -= ent->duration;
+ }
+
+ for (i=0; i<count; i++) {
u32 repeat;
GF_MPD_SegmentTimelineEntry *ent = gf_list_get(timeline->entries, i);
- if (!segtime) start_segtime = segtime = ent->start_time;
+ if (!segtime) {
+ start_segtime = segtime = ent->start_time;
+
+ //if current time is before the start of the previous segement, consider our timing is broken
+ if (current_time_rescale + ent->duration < segtime) {
+ GF_LOG(GF_LOG_WARNING, GF_LOG_DASH, ("[DASH] current time "LLU" is before start time "LLU" of first segment in timeline (timescale %d) by %g sec - using first segment as starting point\n", current_time_rescale, segtime, timescale, (segtime-current_time_rescale)*1.0/timescale));
+ group->download_segment_index = seg_idx;
+ group->nb_segments_in_rep = count;
+ group->start_playback_range = (segtime)*1.0/timescale;
+ group->ast_at_init = availabilityStartTime - (u32) (ast_offset*1000);
+ group->broken_timing = 1;
+ return;
+ }
+ }
repeat = 1+ent->repeat_count;
while (repeat) {
- if ((current_time >= segtime) && (current_time < segtime + ent->duration)) {
+ if ((current_time_rescale >= segtime) && (current_time_rescale < segtime + ent->duration)) {
+ GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[DASH] Found segment %d for current time "LLU" is in SegmentTimeline ["LLU"-"LLU"] (timecale %d - current index %d)\n", seg_idx, current_time_rescale, start_segtime, segtime + ent->duration, timescale, group->download_segment_index));
+
group->download_segment_index = seg_idx;
- group->nb_segments_in_rep = seg_idx + 10;
-
- GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[DASH] Found segment %d for current time "LLU" is in SegmentTimeline ["LLU"-"LLU"] - cannot estimate current startNumber, default to 0 ...\n", current_time, segtime, segtime + ent->duration));
+ group->nb_segments_in_rep = seg_idx + count - i;
+ group->start_playback_range = (current_time)/1000.0;
+ group->ast_at_init = availabilityStartTime - (u32) (ast_offset*1000);
- group->start_playback_range = (current_time )/1000.0;
+ //to remove - this is a hack to speedup starting for some strange MPDs which announce the live point as the first segment but have already produced the complete timeline
+ if (group->dash->utc_drift_estimate<0) {
+ group->ast_at_init -= (timeline_duration - (segtime-start_segtime)) *1000/timescale;
+ }
return;
}
segtime += ent->duration;
}
}
//NOT FOUND !!
- GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[DASH] current time "LLU" is NOT in SegmentTimeline ["LLU"-"LLU"] - cannot estimate current startNumber, default to 0 ...\n", current_time, start_segtime, segtime));
+ GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[DASH] current time "LLU" is NOT in SegmentTimeline ["LLU"-"LLU"] - cannot estimate current startNumber, default to 0 ...\n", current_time_rescale, start_segtime, segtime));
group->download_segment_index = 0;
group->nb_segments_in_rep = 10;
group->broken_timing = 1;
if (! *sess) {
*sess = dash_io->create(dash_io, persistent_mode ? 1 : 0, url, group_idx);
if (!(*sess)){
- assert(0);
GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[DASH] Cannot try to download %s... OUT of memory ?\n", url));
return GF_OUT_OF_MEM;
}
return start_time;
}
-static Double gf_dash_get_segment_start_time(GF_DASH_Group *group, Double *segment_duration)
+static u64 gf_dash_get_segment_start_time_with_timescale(GF_DASH_Group *group, u64 *segment_duration, u32 *scale)
{
GF_MPD_Representation *rep;
GF_MPD_AdaptationSet *set;
GF_MPD_Period *period;
- Double start_time;
+ u64 start_time;
u32 timescale;
s32 segment_index;
u64 duration;
if (! timescale) timescale=1;
if (timeline) {
- start_time = (Double) gf_dash_segment_timeline_start(timeline, segment_index, &duration);
+ start_time = gf_dash_segment_timeline_start(timeline, segment_index, &duration);
} else {
- start_time = segment_index * (Double) duration;
- }
- start_time /= timescale;
- if (segment_duration) {
- *segment_duration = (Double) duration;
- *segment_duration /= timescale;
+ start_time = segment_index * duration;
}
+ if (segment_duration) *segment_duration = duration;
+ if (scale) *scale = timescale;
return start_time;
}
if (!timescale) timescale=1;
if (timeline) {
- start_time = (Double) gf_dash_segment_timeline_start(timeline, segment_index, &duration);
+ start_time = gf_dash_segment_timeline_start(timeline, segment_index, &duration);
} else {
- start_time = segment_index * (Double) duration;
+ start_time = segment_index * duration;
}
- start_time /= timescale;
+ if (segment_duration) *segment_duration = duration;
+ if (scale) *scale = timescale;
+ return start_time;
+}
+
+static Double gf_dash_get_segment_start_time(GF_DASH_Group *group, Double *segment_duration)
+{
+ u64 start, dur;
+ u32 scale;
+
+ start = gf_dash_get_segment_start_time_with_timescale(group, &dur, &scale);
if (segment_duration) {
- *segment_duration = (Double) duration;
- *segment_duration /= timescale;
+ *segment_duration = (Double) dur;
+ *segment_duration /= scale;
}
-
- return start_time;
+ return ((Double)start)/scale;
}
u64 gf_dash_get_segment_availability_start_time(GF_MPD *mpd, GF_DASH_Group *group, u32 segment_index, u32 *seg_dur_ms)
}
}
-static GF_Err gf_dash_merge_segment_timeline(GF_MPD_SegmentList *old_list, GF_MPD_SegmentTemplate *old_template, GF_MPD_SegmentList *new_list, GF_MPD_SegmentTemplate *new_template, Double min_start_time)
+
+static u32 gf_dash_get_index_in_timeline(GF_MPD_SegmentTimeline *timeline, u64 start, u64 start_timescale, u64 timescale)
+{
+ u64 start_time = 0;
+ u32 idx = 0;
+ u32 i, count, repeat;
+ count = gf_list_count(timeline->entries);
+ for (i=0; i<count; i++) {
+ GF_MPD_SegmentTimelineEntry *ent = gf_list_get(timeline->entries, i);
+
+ if (!i || ent->start_time) start_time = ent->start_time;
+
+ repeat = ent->repeat_count+1;
+ while (repeat) {
+ if (start_timescale==timescale) {
+ if (start_time == start ) return idx;
+ } else {
+ if (start_time*start_timescale == start * timescale) return idx;
+ }
+ start_time+=ent->duration;
+ repeat--;
+ idx++;
+ }
+ }
+ //end of list in regular case: segment was the last one of the previous list and no changes happend
+ if (start_timescale==timescale) {
+ if (start_time == start ) return count;
+ } else {
+ if (start_time*start_timescale == start * timescale) return count;
+ }
+
+ GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[DASH] Error: could not find previous segment start in current timeline ! seeking to end of timeline\n"));
+ return count;
+}
+
+
+static GF_Err gf_dash_merge_segment_timeline(GF_DASH_Group *group, GF_DashClient *dash, GF_MPD_SegmentList *old_list, GF_MPD_SegmentTemplate *old_template, GF_MPD_SegmentList *new_list, GF_MPD_SegmentTemplate *new_template, Double min_start_time)
{
GF_MPD_SegmentTimeline *old_timeline, *new_timeline;
- u32 idx;
+ u32 i, idx, prev_count, timescale;
u64 start_time;
GF_MPD_SegmentTimelineEntry *first_new_entry;
}
old_timeline = old_list->segment_timeline;
new_timeline = new_list->segment_timeline;
+ timescale = new_list->timescale;
} else if (old_template && old_template->segment_timeline) {
if (!new_template || !new_template->segment_timeline) {
GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[DASH] Error - cannot update playlist: segment timeline not present in new MPD segmentTemplate\n"));
}
old_timeline = old_template->segment_timeline;
new_timeline = new_template->segment_timeline;
+ timescale = new_template->timescale;
}
if (!old_timeline && !new_timeline) return GF_OK;
+ prev_count = gf_list_count(old_timeline->entries);
+
+
+ if (group) {
+ group->current_start_time = gf_dash_get_segment_start_time_with_timescale(group, NULL, &group->current_timescale);
+ } else {
+ for (i=0; i<gf_list_count(dash->groups); i++) {
+ GF_DASH_Group *a_group = gf_list_get(dash->groups, i);
+ a_group->current_start_time = gf_dash_get_segment_start_time_with_timescale(a_group, NULL, &a_group->current_timescale);
+ }
+ }
+
+
first_new_entry = gf_list_get(new_timeline->entries, 0);
idx = 0;
start_time=0;
gf_list_insert(new_timeline->entries, ent, idx);
idx ++;
gf_list_rem(old_timeline->entries, 0);
+ }
+ if (group) {
+ group->nb_segments_in_rep = gf_list_count(new_timeline->entries);
+ group->download_segment_index = gf_dash_get_index_in_timeline(new_timeline, group->current_start_time, group->current_timescale, timescale);
+ } else {
+ u32 i;
+ for (i=0; i<gf_list_count(dash->groups); i++) {
+ GF_DASH_Group *a_group = gf_list_get(dash->groups, i);
+ a_group->nb_segments_in_rep = gf_list_count(new_timeline->entries);
+ a_group->download_segment_index = gf_dash_get_index_in_timeline(new_timeline, a_group->current_start_time, a_group->current_timescale, timescale);
+ }
}
+
+
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] Updated SegmentTimeline: %d entries (%d previously)\n", gf_list_count(new_timeline->entries), prev_count));
+#if 0
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("Dumping new merged timeline: \n"));
+ start_time=0;
+ for (idx=0; idx<gf_list_count(new_timeline->entries); idx++) {
+ GF_MPD_SegmentTimelineEntry *ent = gf_list_get(new_timeline->entries, idx);
+ if (!idx) start_time = ent->start_time;
+ assert(!ent->start_time || (ent->start_time >=start_time));
+ start_time += ent->duration*(1+ent->repeat_count);
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("\tt="LLU" d=%d r=%d\n", ent->start_time, ent->duration, ent->repeat_count));
+ }
+#endif
+
return GF_OK;
}
GF_MPD_SegmentList *segment_list;
/*update next download index*/
group->download_segment_index -= nb_removed;
+ assert(group->nb_segments_in_rep >= nb_removed);
+ group->nb_segments_in_rep -= nb_removed;
/*clean segmentList*/
segment_list = NULL;
if (group->period && group->period->segment_list) segment_list = group->period->segment_list;
char * purl;
Double timeline_start_time;
GF_MPD *new_mpd;
+ Bool fetch_only = 0;
if (!dash->mpd_dnload) {
local_url = purl = NULL;
gf_free(dash->base_url);
dash->base_url = gf_strdup(purl);
}
+ fetch_only = 1;
}
} else {
local_url = dash->dash_io->get_cache_name(dash->dash_io, dash->mpd_dnload);
/*TODO - check periods are the same !!*/
period = gf_list_get(dash->mpd->periods, dash->active_period_index);
+ if (fetch_only && !period) goto exit;
+
new_period = gf_list_get(new_mpd->periods, dash->active_period_index);
if (!new_period) {
GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[DASH] Error - cannot update playlist: missing period\n"));
for (group_idx=0; group_idx<gf_list_count(dash->groups); group_idx++) {
GF_DASH_Group *group = gf_list_get(dash->groups, group_idx);
- Double group_start = gf_dash_get_segment_start_time(group, NULL);
- if (!group_idx || (timeline_start_time > group_start) ) timeline_start_time = group_start;
+ if (group->selection!=GF_DASH_GROUP_NOT_SELECTABLE) {
+ Double group_start = gf_dash_get_segment_start_time(group, NULL);
+ if (!group_idx || (timeline_start_time > group_start) ) timeline_start_time = group_start;
+ }
}
/*we can rewind our segments from timeshift*/
if (timeline_start_time > timeshift) timeline_start_time -= timeshift;
}
/*update segmentTimeline at Period level*/
- e = gf_dash_merge_segment_timeline(period->segment_list, period->segment_template, new_period->segment_list, new_period->segment_template, timeline_start_time);
+ e = gf_dash_merge_segment_timeline(NULL, dash, period->segment_list, period->segment_template, new_period->segment_list, new_period->segment_template, timeline_start_time);
if (e) {
gf_mpd_del(new_mpd);
return e;
GF_DASH_Group *group = gf_list_get(dash->groups, group_idx);
/*update info even if the group is not selected !*/
-
+ if (group->selection==GF_DASH_GROUP_NOT_SELECTABLE)
+ continue;
set = group->adaptation_set;
new_set = gf_list_get(new_period->adaptation_sets, group_idx);
}
}
- e = gf_dash_merge_segment_timeline(rep->segment_list, rep->segment_template, new_rep->segment_list, new_rep->segment_template, timeline_start_time);
+ e = gf_dash_merge_segment_timeline(group, NULL, rep->segment_list, rep->segment_template, new_rep->segment_list, new_rep->segment_template, timeline_start_time);
if (e) {
gf_mpd_del(new_mpd);
return e;
rep->mime_type = NULL;
}
}
+
+ /*update segmentTimeline at AdaptationSet level before switching the set (old setup needed to compute current timing of each group) */
+ e = gf_dash_merge_segment_timeline(group, NULL, set->segment_list, set->segment_template, new_set->segment_list, new_set->segment_template, timeline_start_time);
+ if (e) {
+ gf_mpd_del(new_mpd);
+ return e;
+ }
+
/*update group/period to new period*/
j = gf_list_find(group->period->adaptation_sets, group->adaptation_set);
group->adaptation_set = gf_list_get(new_period->adaptation_sets, j);
j = gf_list_count(group->adaptation_set->representations);
assert(j);
- /*update segmentTimeline at AdaptationSet level*/
- e = gf_dash_merge_segment_timeline(set->segment_list, set->segment_template, new_set->segment_list, new_set->segment_template, timeline_start_time);
- if (e) {
- gf_mpd_del(new_mpd);
- return e;
- }
-
/*now that all possible SegmentXXX have been updated, purge them if needed: all segments ending before timeline_start_time
will be removed from MPD*/
if (timeline_start_time) {
}
if (new_mpd->availabilityStartTime != dash->mpd->availabilityStartTime) {
- gf_dash_group_timeline_setup(new_mpd, group, fetch_time);
+ s64 diff = new_mpd->availabilityStartTime;
+ diff -= dash->mpd->availabilityStartTime;
+ if (diff < 0) diff = diff;
+ if (diff>3000)
+ gf_dash_group_timeline_setup(new_mpd, group, fetch_time);
}
group->maybe_end_of_stream = 0;
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] Updated AdaptationSet %d - %d segments\n", group_idx+1, group->nb_segments_in_rep));
}
+
+exit:
/*swap representations - we don't need to update download_segment_index as it still points to the right entry in the merged list*/
if (dash->mpd)
gf_mpd_del(dash->mpd);
return e;
}
- /*no error and no init segment, go for media segment*/
+ /*no error and no init segment, go for media segment - this is needed for TS so that the set of media streams can be
+ declared to the player */
if (!base_init_url) {
- //if no init segment don't download first segment
-#if 1
- gf_mx_v(dash->dl_mutex);
- return GF_OK;
-#else
e = gf_dash_resolve_url(dash->mpd, rep, group, dash->base_url, GF_DASH_RESOLVE_URL_MEDIA, group->download_segment_index, &base_init_url, &start_range, &end_range, &group->current_downloaded_segment_duration, NULL);
if (e) {
gf_mx_v(dash->dl_mutex);
return e;
}
nb_segment_read = 1;
-#endif
} else if (!group->bitstream_switching) {
group->dont_delete_first_segment = 1;
}
/*do not erase local files*/
group->local_files = group->was_segment_base ? 0 : 1;
if (group->local_files) {
- gf_dash_buffer_off(group, dash);
+ gf_dash_buffer_off(group);
}
group->download_segment_index += nb_segment_read;
gf_dash_set_group_representation(group, rep);
}
+static void gf_dash_group_reset(GF_DashClient *dash, GF_DASH_Group *group)
+{
+ if (group->buffering) {
+ gf_dash_buffer_off(group);
+ }
+ if (group->urlToDeleteNext) {
+ if (!dash->keep_files && !group->local_files)
+ dash->dash_io->delete_cache_file(dash->dash_io, group->segment_download, group->urlToDeleteNext);
+
+ gf_free(group->urlToDeleteNext);
+ group->urlToDeleteNext = NULL;
+ }
+ if (group->segment_download) {
+ dash->dash_io->del(dash->dash_io, group->segment_download);
+ group->segment_download = NULL;
+ }
+ while (group->nb_cached_segments) {
+ group->nb_cached_segments --;
+ if (!dash->keep_files && !group->local_files)
+ gf_delete_file(group->cached[group->nb_cached_segments].cache);
+
+ gf_free(group->cached[group->nb_cached_segments].cache);
+ gf_free(group->cached[group->nb_cached_segments].url);
+ }
+ group->timeline_setup = 0;
+}
+
static void gf_dash_reset_groups(GF_DashClient *dash)
{
/*send playback destroy event*/
GF_DASH_Group *group = gf_list_last(dash->groups);
gf_list_rem_last(dash->groups);
- if (group->urlToDeleteNext) {
- if (!dash->keep_files && !group->local_files)
- dash->dash_io->delete_cache_file(dash->dash_io, group->segment_download, group->urlToDeleteNext);
-
- gf_free(group->urlToDeleteNext);
- group->urlToDeleteNext = NULL;
- }
- if (group->segment_download) {
- dash->dash_io->del(dash->dash_io, group->segment_download);
- group->segment_download = NULL;
- }
- while (group->nb_cached_segments) {
- group->nb_cached_segments --;
- if (!dash->keep_files && !group->local_files)
- gf_delete_file(group->cached[group->nb_cached_segments].cache);
+ gf_dash_group_reset(dash, group);
- gf_free(group->cached[group->nb_cached_segments].cache);
- gf_free(group->cached[group->nb_cached_segments].url);
- }
gf_free(group->cached);
-
if (group->service_mime)
gf_free(group->service_mime);
gf_free(group);
continue;
}
}
+ if (rep->codecs && dash->max_bit_per_pixel) {
+ char *vid_type = strstr(rep->codecs, "hvc");
+ if (!vid_type) vid_type = strstr(rep->codecs, "hev");
+ if (!vid_type) vid_type = strstr(rep->codecs, "avc");
+ if (!vid_type) vid_type = strstr(rep->codecs, "svc");
+ if (!vid_type) vid_type = strstr(rep->codecs, "mvc");
+ //HEVC
+ if (vid_type && (!strnicmp(rep->codecs, "hvc", 3) || !strnicmp(rep->codecs, "hev", 3))) {
+ char *pidc = rep->codecs+5;
+ if ((pidc[0]=='A') || (pidc[0]=='B') || (pidc[0]=='C')) pidc++;
+ //Main 10 !!
+ if (!strncmp(pidc, "2.", 2)) {
+ rep->playback.disabled = 1;
+ continue;
+ }
+ }
+ //AVC
+ if (vid_type && (!strnicmp(rep->codecs, "avc", 3) || !strnicmp(rep->codecs, "svc", 3) || !strnicmp(rep->codecs, "mvc", 3))) {
+ char prof_string[3];
+ u8 prof;
+ strncpy(prof_string, vid_type+5, 2);
+ prof_string[2]=0;
+ prof = atoi(prof_string);
+ //Main 10
+ if (prof==0x6E) {
+ rep->playback.disabled = 1;
+ continue;
+ }
+ }
+ }
rep->playback.disabled = 0;
if (rep->width>set->max_width) {
u32 nb_rep_ok = 0;
GF_DASH_Group *group = gf_list_get(dash->groups, group_i);
-#ifdef DEBUG_SKIP_FIRST_SET
- if (group_i==0)
+ if ((dash->debug_group_index>=0) && (group_i != (u32) dash->debug_group_index)) {
+ group->selection = GF_DASH_GROUP_NOT_SELECTABLE;
continue;
-#endif
+ }
nb_rep = gf_list_count(group->adaptation_set->representations);
group->selection = GF_DASH_GROUP_NOT_SELECTED;
nb_groups_ok++;
-
-#ifdef DEBUG_FIRST_SET_ONLY
- break;
-#endif
}
period = gf_list_get(dash->mpd->periods, dash->active_period_index);
for (k=0; k<gf_list_count(group->adaptation_set->representations); k++) {
GF_MPD_Representation *arep = gf_list_get(group->adaptation_set->representations, k);
+ if (arep->playback.disabled) continue;
+
if (dl_rate >= arep->bandwidth) {
if (!new_rep) new_rep = arep;
else if (go_up) {
group_count = gf_list_count(dash->groups);
for (i=0; i<group_count; i++) {
GF_DASH_Group *group = gf_list_get(dash->groups, i);
- if (group->selection != GF_DASH_GROUP_SELECTED) continue;
+ if (group->selection==GF_DASH_GROUP_NOT_SELECTABLE)
+ continue;
+
+ //by default all groups are started (init seg download and buffering). They will be (de)selected by the user
if (first_period_in_mpd) {
- gf_dash_buffer_on(group, dash);
+ gf_dash_buffer_on(group);
}
e = gf_dash_download_init_segment(dash, group);
if (e) break;
if (dash->force_mpd_update || (dash->mpd->minimum_update_period && (timer > dash->mpd->minimum_update_period))) {
u32 diff = gf_sys_clock();
dash->force_mpd_update = 0;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] At %d Time to update the playlist (%u ms elapsed since last refresh and min reoad rate is %u)\n", gf_sys_clock() , timer, dash->mpd->minimum_update_period));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] At %d Time to update the playlist (%u ms elapsed since last refresh and min reload rate is %u)\n", gf_sys_clock() , timer, dash->mpd->minimum_update_period));
e = gf_dash_update_manifest(dash);
group_count = gf_list_count(dash->groups);
diff = gf_sys_clock() - diff;
Bool use_byterange;
u32 representation_index;
u32 clock_time;
+ Bool empty_file = GF_FALSE;
GF_DASH_Group *group = gf_list_get(dash->groups, i);
- if (group->selection != GF_DASH_GROUP_SELECTED) continue;
+
+ if (group->selection != GF_DASH_GROUP_SELECTED) {
+ if (group->nb_cached_segments) {
+ gf_dash_group_reset(dash, group);
+ }
+ continue;
+ }
if (group->done) continue;
if (group->nb_cached_segments>=group->max_cached_segments) {
/*if segment AST is greater than now, it is not yet available - we would need an estimate on how long the request takes to be sent to the server in order to be more reactive ...*/
if (to_wait > 1) {
- GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] Set #%d At %d Next segment %d (AST "LLD") is not yet available on server - requesting later in %d ms\n", i+1, gf_sys_clock(), group->download_segment_index + start_number, segment_ast, to_wait));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] Set #%d At %d Next segment %d (AST "LLD" - sec in period %g) is not yet available on server - requesting later in %d ms\n", i+1, gf_sys_clock(), group->download_segment_index + start_number, segment_ast, (segment_ast - group->period->start - group->ast_at_init)/1000.0, to_wait));
if (group->last_segment_time) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] %d ms elapsed since previous segment download\n", clock_time - group->last_segment_time));
}
min_wait = to_wait;
continue;
} else {
- GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] Set #%d At %d Next segment %d (AST "LLD") should now be available on server since %d ms - requesting it\n", i+1, gf_sys_clock(), group->download_segment_index + start_number, segment_ast, -to_wait));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] Set #%d At %d Next segment %d (AST "LLD" - sec in period %g) should now be available on server since %d ms - requesting it\n", i+1, gf_sys_clock(), group->download_segment_index + start_number, segment_ast, (segment_ast - group->period->start - group->ast_at_init)/1000.0, -to_wait));
if (group->last_segment_time) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] %d ms elapsed since previous segment download\n", clock_time - group->last_segment_time));
e = GF_OK;
/*do not erase local files*/
group->local_files = 1;
- gf_dash_buffer_off(group, dash);
+ gf_dash_buffer_off(group);
if (group->force_switch_bandwidth && !dash->auto_switch_count) {
gf_dash_switch_group_representation(dash, group);
/*restart*/
if (group->segment_must_be_streamed) local_file_name = dash->dash_io->get_url(dash->dash_io, group->segment_download);
else local_file_name = dash->dash_io->get_cache_name(dash->dash_io, group->segment_download);
-
+
+ if (dash->dash_io->get_total_size(dash->dash_io, group->segment_download)==0) {
+ empty_file = GF_TRUE;
+ }
resource_name = dash->dash_io->get_url(dash->dash_io, group->segment_download);
if (!dash->auto_switch_count)
gf_mx_p(dash->dl_mutex);
assert(group->nb_cached_segments<group->max_cached_segments);
assert( local_file_name );
- group->cached[group->nb_cached_segments].cache = gf_strdup(local_file_name);
- group->cached[group->nb_cached_segments].url = gf_strdup( resource_name );
- group->cached[group->nb_cached_segments].start_range = 0;
- group->cached[group->nb_cached_segments].end_range = 0;
- group->cached[group->nb_cached_segments].representation_index = representation_index;
- group->cached[group->nb_cached_segments].duration = (u32) group->current_downloaded_segment_duration;
- group->cached[group->nb_cached_segments].loop_detected = group->loop_detected;
- group->loop_detected = GF_FALSE;
-
- if (group->local_files && use_byterange) {
- group->cached[group->nb_cached_segments].start_range = start_range;
- group->cached[group->nb_cached_segments].end_range = end_range;
+ if (! empty_file) {
+
+ group->cached[group->nb_cached_segments].cache = gf_strdup(local_file_name);
+ group->cached[group->nb_cached_segments].url = gf_strdup( resource_name );
+ group->cached[group->nb_cached_segments].start_range = 0;
+ group->cached[group->nb_cached_segments].end_range = 0;
+ group->cached[group->nb_cached_segments].representation_index = representation_index;
+ group->cached[group->nb_cached_segments].duration = (u32) group->current_downloaded_segment_duration;
+ group->cached[group->nb_cached_segments].loop_detected = group->loop_detected;
+ group->loop_detected = GF_FALSE;
+
+ if (group->local_files && use_byterange) {
+ group->cached[group->nb_cached_segments].start_range = start_range;
+ group->cached[group->nb_cached_segments].end_range = end_range;
+ }
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] Added file to cache (%u/%u in cache): %s\n", group->nb_cached_segments+1, group->max_cached_segments, group->cached[group->nb_cached_segments].url));
+ group->nb_cached_segments++;
+ gf_dash_update_buffering(group, dash);
}
- GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DASH] Added file to cache (%u/%u in cache): %s\n", group->nb_cached_segments+1, group->max_cached_segments, group->cached[group->nb_cached_segments].url));
- group->nb_cached_segments++;
- gf_dash_update_buffering(group, dash);
+
/* download enhancement representation of this segment*/
if ((representation_index != group->force_max_rep_index) && rep->enhancement_rep_index_plus_one)
group->active_rep_index = rep->enhancement_rep_index_plus_one - 1;
return (group->selection == GF_DASH_GROUP_SELECTED) ? 1 : 0;
}
+GF_EXPORT
+Bool gf_dash_is_group_selectable(GF_DashClient *dash, u32 idx)
+{
+ GF_DASH_Group *group = gf_list_get(dash->groups, idx);
+ if (!group) return 0;
+ return (group->selection == GF_DASH_GROUP_NOT_SELECTABLE) ? 0 : 1;
+}
+
GF_EXPORT
void gf_dash_get_info(GF_DashClient *dash, const char **title, const char **source)
{
GF_EXPORT
void gf_dash_group_select(GF_DashClient *dash, u32 idx, Bool select)
{
+ Bool needs_resetup = 0;
GF_DASH_Group *group = gf_list_get(dash->groups, idx);
if (!group) return;
if (group->selection == GF_DASH_GROUP_NOT_SELECTABLE)
return;
+ if ((group->selection==GF_DASH_GROUP_NOT_SELECTED) && select) needs_resetup = 1;
+
group->selection = select ? GF_DASH_GROUP_SELECTED : GF_DASH_GROUP_NOT_SELECTED;
/*this set is part of a group, make sure no all other sets from the indicated group are unselected*/
if (select && (group->adaptation_set->group>=0)) {
}
}
}
+ //TODO: recompute grop download index based on current playback ...
+ if (needs_resetup) {
+
+ }
}
GF_EXPORT
}
GF_EXPORT
-GF_Err gf_dash_set_max_resolution(GF_DashClient *dash, u32 width, u32 height)
+GF_Err gf_dash_set_max_resolution(GF_DashClient *dash, u32 width, u32 height, u8 max_display_bpp)
{
if (dash) {
dash->max_width = width;
dash->max_height = height;
+ dash->max_bit_per_pixel = max_display_bpp;
return GF_OK;
}
return GF_BAD_PARAM;
}
+GF_EXPORT
+void gf_dash_debug_group(GF_DashClient *dash, s32 group_index)
+{
+ dash->debug_group_index = group_index;
+}
+
+GF_EXPORT
+void gf_dash_set_user_buffer(GF_DashClient *dash, u32 buffer_time_ms)
+{
+ if (dash) dash->user_buffer_ms = buffer_time_ms;
+}
+
+
#endif //GPAC_DISABLE_DASH_CLIENT
/*for audio*/
u32 sample_rate, channels;
- /*for anything*/
+ /*apply to any media. We use 5 bytes because we may use copy data converted from gf_4cc_to_str which is 5 bytes*/
char szLang[5];
};
u32 initial_moof_sn;
u64 initial_tfdt;
Bool no_fragments_defaults;
+ Bool samplegroups_in_traf;
} GF_DASHSegmenterOptions;
struct _dash_segment_input
u64 MaxFragmentDuration, MaxSegmentDuration, SegmentDuration, maxFragDurationOverSegment;
u32 presentationTimeOffset = 0;
Double segment_start_time, file_duration, period_duration, max_segment_duration;
- u32 nb_segments, width, height, sample_rate, nb_channels, sar_w, sar_h, fps_num, fps_denum, startNumber, startNumberRewind;
+ u32 nb_segments, width, height, sample_rate, nb_channels, sar_w, sar_h, fps_num, fps_denum, startNumber;
char langCode[5];
u32 index_start_range, index_end_range;
Bool force_switch_segment = GF_FALSE;
file_duration = 0;
startNumber = 1;
- startNumberRewind = 0;
//create output file
/*need to precompute bandwidth*/
store_dash_params=GF_TRUE;
gf_cfg_set_key(dash_cfg->dash_ctx, RepSecName, "ID", dash_input->representationID);
}
- //we no longer support start number changes
-#if 0
- /*we are in time shift enabled mode so segments will get destroyed, set the start number to the current segment
- and restore presentationTimeOffset (cf below)*/
- if (!store_dash_params && (dash_cfg->time_shift_depth >= 0)) {
- opt = gf_cfg_get_key(dash_cfg->dash_ctx, RepSecName, "NextSegmentIndex");
- sscanf(opt, "%u", &startNumber);
-
- /*adjust the startNumber according to the timeShiftBuffer depth*/
- if ((dash_cfg->time_shift_depth>0) && (startNumber>(u32)dash_cfg->time_shift_depth) ) {
- startNumberRewind = dash_cfg->time_shift_depth;
- }
- }
-#endif
}
opt = dash_cfg->dash_ctx ? gf_cfg_get_key(dash_cfg->dash_ctx, RepSecName, "InitializationSegment") : NULL;
if (gf_isom_is_track_in_root_od(input, i+1)) gf_isom_add_track_to_root_od(output, TrackNum);
+ /*remove sgpd in stbl; it wuold be in traf*/
+ if (dash_cfg->samplegroups_in_traf) {
+ GF_TrackBox *trak = (GF_TrackBox *)gf_isom_get_track_from_file(output, TrackNum);
+ if (!trak) continue;
+ while (gf_list_count(trak->Media->information->sampleTable->sampleGroupsDescription)) {
+ GF_Box* box = (GF_Box*)gf_list_get(trak->Media->information->sampleTable->sampleGroupsDescription, 0);
+ gf_isom_box_del(box);
+ gf_list_rem(trak->Media->information->sampleTable->sampleGroupsDescription, 0);
+ }
+ }
+
// Commenting it the code for Timed Text tracks, it may happen that we have only one long sample, fragmentation is useful
#if 0
//if only one sample, don't fragment track
if (e) goto err_exit;
/*copy subsample information*/
- e = gf_isom_fragment_copy_subsample(output, tf->TrackID, input, tf->OriginalTrack, tf->SampleNum + 1);
+ e = gf_isom_fragment_copy_subsample(output, tf->TrackID, input, tf->OriginalTrack, tf->SampleNum + 1, dash_cfg->samplegroups_in_traf);
if (e)
goto err_exit;
max_segment_duration = (Double) (s64) SegmentDuration;
max_segment_duration /= dash_cfg->dash_scale;
}
- force_switch_segment=GF_FALSE;
- switch_segment=GF_TRUE;
- SegmentDuration=GF_FALSE;
+ force_switch_segment = GF_FALSE;
+ switch_segment = GF_TRUE;
+ SegmentDuration = 0;
split_at_rap = GF_FALSE;
has_rap = GF_FALSE;
/*restore fragment duration*/
if (!dash_cfg->variable_seg_rad_name && first_in_set) {
const char *rad_name = gf_url_get_resource_name(seg_rad_name);
gf_media_mpd_format_segment_name(GF_DASH_TEMPLATE_TEMPLATE, is_bs_switching, SegmentName, output_file, dash_input->representationID, rad_name, !stricmp(seg_ext, "null") ? NULL : seg_ext, 0, 0, 0, dash_cfg->use_segment_timeline);
- fprintf(dash_cfg->mpd, " <SegmentTemplate timescale=\"%d\" media=\"%s\" startNumber=\"%d\"", mpd_timeline_bs ? dash_cfg->dash_scale : mpd_timescale, SegmentName, startNumber - startNumberRewind);
+ fprintf(dash_cfg->mpd, " <SegmentTemplate timescale=\"%d\" media=\"%s\" startNumber=\"%d\"", mpd_timeline_bs ? dash_cfg->dash_scale : mpd_timescale, SegmentName, startNumber);
if (!mpd_timeline_bs) {
+ if (!max_segment_duration)
+ max_segment_duration = dash_cfg->segment_duration;
fprintf(dash_cfg->mpd, " duration=\"%d\"", (u32) (max_segment_duration * mpd_timescale));
}
/*in BS switching we share the same IS for all reps*/
if (dash_cfg->variable_seg_rad_name) {
const char *rad_name = gf_url_get_resource_name(seg_rad_name);
gf_media_mpd_format_segment_name(GF_DASH_TEMPLATE_TEMPLATE, is_bs_switching, SegmentName, output_file, dash_input->representationID, rad_name, !stricmp(seg_ext, "null") ? NULL : seg_ext, 0, bandwidth, 0, dash_cfg->use_segment_timeline);
- fprintf(dash_cfg->mpd, " <SegmentTemplate timescale=\"%d\" duration=\"%d\" media=\"%s\" startNumber=\"%d\"", mpd_timescale, (u32) (max_segment_duration * mpd_timescale), SegmentName, startNumber - startNumberRewind);
+ fprintf(dash_cfg->mpd, " <SegmentTemplate timescale=\"%d\" duration=\"%d\" media=\"%s\" startNumber=\"%d\"", mpd_timescale, (u32) (max_segment_duration * mpd_timescale), SegmentName, startNumber);
if (!is_bs_switching) {
gf_media_mpd_format_segment_name(GF_DASH_TEMPLATE_INITIALIZATION_TEMPLATE, is_bs_switching, SegmentName, output_file, dash_input->representationID, rad_name, !stricmp(seg_ext, "null") ? NULL : "mp4", 0, 0, 0, dash_cfg->use_segment_timeline);
fprintf(dash_cfg->mpd, " initialization=\"%s\"", SegmentName);
char szSectionName[100], szRepURLsSecName[100];
char szCodecs[100];
const char *opt;
- u32 i, startNumberRewind;
+ u32 i;
GF_Err e;
u64 start, pcr_shift, next_pcr_shift;
Double cumulated_duration = 0;
/*create bitstreams*/
segment_index = 1;
- startNumberRewind = 0;
ts_seg.index_file = NULL;
ts_seg.index_bs = NULL;
if (!dash_cfg->dash_ctx && (dash_cfg->use_url_template != 2)) {
opt = gf_cfg_get_key(dash_cfg->dash_ctx, szSectionName, "StartIndex");
if (opt) sscanf(opt, "%u", &segment_index);
- /*adjust the startNumber according to the timeShiftBuffer depth*/
- if ((dash_cfg->time_shift_depth>0) && (segment_index > (u32)dash_cfg->time_shift_depth) ) {
- startNumberRewind = dash_cfg->time_shift_depth;
- }
-
-
opt = gf_cfg_get_key(dash_cfg->dash_ctx, szSectionName, "PCR90kOffset");
if (opt) sscanf(opt, LLU, &pcr_shift);
/*write segment template for all representations*/
if (first_in_set && dash_cfg->seg_rad_name && dash_cfg->use_url_template && !dash_cfg->variable_seg_rad_name) {
gf_media_mpd_format_segment_name(GF_DASH_TEMPLATE_TEMPLATE, 1, SegName, basename, dash_input->representationID, gf_url_get_resource_name(dash_cfg->seg_rad_name), "ts", 0, bandwidth, segment_index, dash_cfg->use_segment_timeline);
- fprintf(dash_cfg->mpd, " <SegmentTemplate timescale=\"90000\" duration=\"%d\" startNumber=\"%d\" media=\"%s\"", (u32) (90000*dash_cfg->segment_duration), segment_index - startNumberRewind, SegName);
+ fprintf(dash_cfg->mpd, " <SegmentTemplate timescale=\"90000\" duration=\"%d\" startNumber=\"%d\" media=\"%s\"", (u32) (90000*dash_cfg->segment_duration), segment_index, SegName);
if (!dash_cfg->dash_ctx) {
gf_media_mpd_format_segment_name(GF_DASH_TEMPLATE_INITIALIZATION_TEMPLATE, 1, IdxName, basename, dash_input->representationID, gf_url_get_resource_name(dash_cfg->seg_rad_name), "six", 0, bandwidth, segment_index, dash_cfg->use_segment_timeline);
fprintf(dash_cfg->mpd, " index=\"%s\"", IdxName);
/*TODO what should we put for minBufferTime */
fprintf(mpd, "<MPD xmlns=\"urn:mpeg:dash:schema:mpd:2011\" minBufferTime=\"PT%fS\" type=\"%s\"", min_buffer, dash_dynamic ? "dynamic" : "static");
if (dash_dynamic) {
- /*otherwise timeshift is infinite, use original availability start time*/
- if ((s32)time_shift_depth<0) {
- const char *opt = gf_cfg_get_key(dash_ctx, "DASH", "GenerationNTP");
- sscanf(opt, "%u", &sec);
- sec += ast_shift_sec;
- }
+ //we only support profiles for which AST has to be the same
+ const char *opt = gf_cfg_get_key(dash_ctx, "DASH", "GenerationNTP");
+ sscanf(opt, "%u", &sec);
+ sec += ast_shift_sec;
+
#ifdef _WIN32_WCE
*(LONGLONG *) &filet = (sec - GF_NTP_SEC_1900_TO_1970) * 10000000 + TIMESPEC_TO_FILETIME_OFFSET;
FileTimeToSystemTime(&filet, &syst);
gf_cfg_set_key(dash_ctx, "DASH", "SessionType", (*dynamic==2) ? "dynamic-debug" : ( *dynamic ? "dynamic" : "static" ) );
gf_cfg_set_key(dash_ctx, "DASH", "TimeShiftBufferDepth", szVal);
gf_cfg_set_key(dash_ctx, "DASH", "StoreParams", "yes");
+ }
+ //switching from live to static
+ else if (! (*dynamic) && !strncmp(opt, "dynamic", 7)) {
+ gf_cfg_set_key(dash_ctx, "DASH", "SessionType", "static");
} else {
*dynamic = 0;
if (!strcmp(opt, "dynamic")) *dynamic = 1;
else if (!strcmp(opt, "dynamic-debug")) *dynamic = 2;
+
opt = gf_cfg_get_key(dash_ctx, "DASH", "TimeShiftBufferDepth");
*timeShiftBufferDepth = atoi(opt);
gf_cfg_set_key(dash_ctx, "DASH", "StoreParams", "no");
Bool seg_at_rap, Double dash_duration, char *seg_name, char *seg_ext, u32 segment_marker_4cc,
Double frag_duration, s32 subsegs_per_sidx, Bool daisy_chain_sidx, Bool frag_at_rap, const char *tmpdir,
GF_Config *dash_ctx, u32 dash_dynamic, u32 mpd_update_time, u32 time_shift_depth, Double subduration, Double min_buffer,
- u32 ast_shift_sec, u32 dash_scale, Bool fragments_in_memory, u32 initial_moof_sn, u64 initial_tfdt, Bool no_fragments_defaults, Bool pssh_moof)
+ u32 ast_shift_sec, u32 dash_scale, Bool fragments_in_memory, u32 initial_moof_sn, u64 initial_tfdt, Bool no_fragments_defaults, Bool pssh_moof, Bool samplegroups_in_traf)
{
u32 i, j, segment_mode;
char *sep, szSegName[GF_MAX_PATH], szSolvedSegName[GF_MAX_PATH], szTempMPD[GF_MAX_PATH], szOpt[GF_MAX_PATH];
/*init dash context if needed*/
if (dash_ctx) {
-
e = gf_dasher_init_context(dash_ctx, &dash_dynamic, &time_shift_depth, NULL, ast_shift_sec);
if (e) return e;
if (dash_ctx) {
memset(dash_inputs, 0, sizeof(GF_DashSegInput)*nb_dash_inputs);
j = 0;
for (i=0; i<nb_inputs; i++) {
- u32 nb_diff;
+ s32 nb_diff;
dash_inputs[j].file_name = inputs[i].file_name;
strcpy(dash_inputs[j].representationID, inputs[i].representationID);
strcpy(dash_inputs[j].periodID, inputs[i].periodID);
if (!strlen(dash_inputs[j].periodID)) {
max_period = 1;
dash_inputs[j].period = 1;
- if (dash_dynamic) {
+ //assign ID if dynamic - if dash_ctx also assign ID since we could have moved from dynamic to static
+ if (dash_dynamic || dash_ctx) {
strcpy(dash_inputs[j].periodID, "GENID_DEF");
}
}
nb_diff = nb_dash_inputs;
dash_inputs[j].moof_seqnum_increase = 2 + (u32) (dash_duration/frag_duration);
e = gf_dash_segmenter_probe_input(&dash_inputs, &nb_dash_inputs, j);
-
+ nb_diff = nb_dash_inputs - nb_diff;
if (e) {
GF_LOG(GF_LOG_WARNING, GF_LOG_DASH, ("[DASH]: Cannot open file %s for dashing: %s\n", dash_inputs[i].file_name, gf_error_to_string(e) ));
+ nb_diff--;
}
if (!strcmp(dash_inputs[j].szMime, "video/mp2t")) has_mpeg2 = 1;
- nb_diff = nb_dash_inputs - nb_diff;
j += 1+nb_diff;
}
+ if (!j) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[DASH] Error: no suitable file found for dashing.\n"));
+ e = GF_BAD_PARAM;
+ goto exit;
+ }
+
memset(&dash_opts, 0, sizeof(GF_DASHSegmenterOptions));
/*set all default roles to main if needed*/
dash_opts.inband_param_set = ((bitstream_switching == GF_DASH_BSMODE_INBAND) || (bitstream_switching == GF_DASH_BSMODE_SINGLE) ) ? 1 : 0;
dash_opts.memory_mode = fragments_in_memory;
dash_opts.pssh_moof = pssh_moof;
+ dash_opts.samplegroups_in_traf = samplegroups_in_traf;
dash_opts.segment_duration = dash_duration * 1000 / dash_scale;
dash_opts.subduration = subduration * 1000 / dash_scale;
fps_num = dash_inputs[i].components[j].fps_num;
fps_denum = dash_inputs[i].components[j].fps_denum;
}
+
+ if (dash_inputs[i].components[j].szLang && dash_inputs[i].components[j].szLang[0] && strncmp(dash_inputs[i].components[j].szLang, dash_inputs[i].components[j].szLang, 3) ) {
+ if (szLang[0]) {
+ GF_LOG(GF_LOG_WARNING, GF_LOG_DASH, ("[DASH] two languages in adaptation set: %s will be kept %s will be ignored\n", szLang, dash_inputs[i].components[j].szLang));
+ } else {
+ memcpy(szLang, dash_inputs[i].components[j].szLang, 4*sizeof(char));
+ }
+ }
}
}
exit:
if (mpd) {
fclose(mpd);
- if (!e && dash_dynamic)
- gf_move_file(szTempMPD, mpdfile);
+ if (!e && dash_dynamic) {
+ gf_delete_file(mpdfile);
+ e = gf_move_file(szTempMPD, mpdfile);
+ if (e) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[DASH] Error moving file %s to %s: %s\n", szTempMPD, mpdfile, gf_error_to_string(e) ));
+ }
+ }
}
gf_free(dash_inputs);
return e;
char *dsi = NULL;
u32 w, h;
u32 dsi_len = 0;
- GF_DecoderConfig *dcd = gf_isom_get_decoder_config(streamer->isom, track->track_num, 1);
+ GF_DecoderConfig *dcd;
+ //use inspect mode so that we don't aggregate xPS from the base in the enhancement ESD
+ gf_isom_set_nalu_extract_mode(streamer->isom, track->track_num, GF_ISOM_NALU_EXTRACT_INSPECT);
+ dcd = gf_isom_get_decoder_config(streamer->isom, track->track_num, 1);
if (dcd && dcd->decoderSpecificInfo) {
dsi = dcd->decoderSpecificInfo->data;
case GF_ISOM_SUBTYPE_HEV1:
case GF_ISOM_SUBTYPE_HVC2:
case GF_ISOM_SUBTYPE_HEV2:
+ case GF_ISOM_SUBTYPE_SHC1:
{
- GF_HEVCConfig *hevcc = NULL;
+ GF_HEVCConfig *hevcc = NULL, *shvcc = NULL;
hevcc = gf_isom_hevc_config_get(streamer->isom, track->track_num, 1);
if (hevcc) {
track->avc_nalu_size = hevcc->nal_unit_size;
streamType = GF_STREAM_VISUAL;
oti = GPAC_OTI_VIDEO_HEVC;
}
+ shvcc = gf_isom_shvc_config_get(streamer->isom, track->track_num, 1);
+ if (shvcc) {
+ track->avc_nalu_size = shvcc->nal_unit_size;
+ gf_odf_hevc_cfg_del(shvcc);
+ streamType = GF_STREAM_VISUAL;
+ oti = GPAC_OTI_VIDEO_SHVC;
+ }
flags |= GP_RTP_PCK_USE_MULTI;
break;
}
#include <gpac/html5_media.h>
#include <gpac/html5_mse.h>
+GF_HTML_MediaTimeRanges *gf_html_timeranges_new(u32 timescale)
+{
+ GF_HTML_MediaTimeRanges *ranges;
+ GF_SAFEALLOC(ranges, GF_HTML_MediaTimeRanges);
+ ranges->times = gf_list_new();
+ ranges->timescale = timescale;
+ return ranges;
+}
-GF_Err gf_media_time_ranges_add(GF_HTML_MediaTimeRanges *timeranges, double start, double end)
+static GF_Err gf_html_timeranges_add_time(GF_HTML_MediaTimeRanges *timeranges, u64 time)
{
- double *d;
+ u64 *t;
if (!timeranges) return GF_BAD_PARAM;
- d = (double *)gf_malloc(sizeof(double));
- *d = start;
- gf_list_add(timeranges->times, d);
- d = (double *)gf_malloc(sizeof(double));
- *d = end;
- gf_list_add(timeranges->times, d);
+ t = (u64 *)gf_malloc(sizeof(u64));
+ *t = time;
+ gf_list_add(timeranges->times, t);
return GF_OK;
}
-void gf_html_timeranges_reset(GF_HTML_MediaTimeRanges *range)
+GF_Err gf_html_timeranges_add_start(GF_HTML_MediaTimeRanges *timeranges, u64 start)
+{
+ return gf_html_timeranges_add_time(timeranges, start);
+}
+
+GF_Err gf_html_timeranges_add_end(GF_HTML_MediaTimeRanges *timeranges, u64 end)
{
- while (gf_list_count(range->times))
+ return gf_html_timeranges_add_time(timeranges, end);
+}
+
+void gf_html_timeranges_reset(GF_HTML_MediaTimeRanges *ranges)
+{
+ while (gf_list_count(ranges->times))
{
- double *d = (double *)gf_list_get(range->times, 0);
+ u64 *d = (u64 *)gf_list_get(ranges->times, 0);
gf_free(d);
- gf_list_rem(range->times, 0);
+ gf_list_rem(ranges->times, 0);
}
}
-void gf_html_timeranges_del(GF_HTML_MediaTimeRanges *range)
+void gf_html_timeranges_del(GF_HTML_MediaTimeRanges *ranges)
+{
+ gf_html_timeranges_reset(ranges);
+ gf_list_del(ranges->times);
+ ranges->times = NULL;
+ gf_free(ranges);
+}
+
+void gf_html_timeranges_merge(GF_HTML_MediaTimeRanges *ranges) {
+ u32 i, count;
+ u64 *start;
+ u64 *end;
+ u64 *prev_end;
+ GF_List *merged = gf_list_new();
+
+ prev_end = NULL;
+ count = gf_list_count(ranges->times);
+ for (i = 0; i < count; i+=2) {
+ start = (u64 *)gf_list_get(ranges->times, i);
+ end = (u64 *)gf_list_get(ranges->times, i+1);
+ if (prev_end == NULL || *start > *prev_end) {
+ if (prev_end) {
+ gf_list_add(merged, prev_end);
+ }
+ gf_list_add(merged, start);
+ } else if (*start == *prev_end) {
+ gf_free(start);
+ }
+ prev_end = end;
+ }
+ if (prev_end) {
+ gf_list_add(ranges->times, prev_end);
+ }
+ gf_list_del(ranges->times);
+ ranges->times = merged;
+}
+
+
+GF_HTML_MediaTimeRanges *gf_html_timeranges_union(GF_HTML_MediaTimeRanges *a, GF_HTML_MediaTimeRanges *b)
+{
+ GF_HTML_MediaTimeRanges *union_ranges;
+ u32 i, j, count_a, count_b;
+ union_ranges = gf_html_timeranges_new(a->timescale);
+ union_ranges->c = a->c;
+ union_ranges->_this = a->_this;
+
+ count_a = gf_list_count(a->times);
+ if (b) {
+ count_b = gf_list_count(b->times);
+ } else {
+ count_b = 0;
+ }
+ if (count_a == 0 && count_b == 0) {
+ return NULL;
+ } else if (count_a == 0) {
+ GF_HTML_MediaTimeRanges *tmp = a;
+ a = b;
+ b = tmp;
+ count_a = count_b;
+ count_b = 0;
+ }
+ i = 0;
+ j = 0;
+ while (i < count_a) {
+ Bool add_a = GF_TRUE;
+ u64 *starta = (u64 *)gf_list_get(a->times, i);
+ u64 *enda = (u64 *)gf_list_get(a->times, i+1);
+ while (j < count_b) {
+ u64 *startb = (u64 *)gf_list_get(b->times, j);
+ u64 *endb = (u64 *)gf_list_get(b->times, j+1);
+ if (*enda*b->timescale < *startb*a->timescale) {
+ /* a ends before b starts, there is no overlap, we can add a to the union */
+ gf_list_add(union_ranges->times, starta);
+ gf_list_add(union_ranges->times, enda);
+ add_a = GF_FALSE;
+ /* force to get the next a */
+ i+=2;
+ break;
+ } else if (*endb*a->timescale < *starta*b->timescale) {
+ /* b ends before a starts, there is no overlap, we can add b to the union */
+ *startb = (u64)((*startb * a->timescale)*1.0 / b->timescale);
+ gf_list_add(union_ranges->times, startb);
+ *endb = (u64)((*endb * a->timescale)*1.0 / b->timescale);
+ gf_list_add(union_ranges->times, endb);
+ j+=2;
+ } else { /* there is some overlap */
+ if (*starta*b->timescale <= *startb*a->timescale) { /* the overlap is at the end of a */
+ if (*endb*a->timescale <= *enda*b->timescale) { /* b is contained in a */
+ /* ignore b, move on to the next b */
+ j+=2;
+ } else { /* *endb > *enda, the overlap is only at the start of b */
+ /* update start of b */
+ *startb = (u64)((*starta * b->timescale)*1.0 / a->timescale);
+ /* ignore a, move on to the next a */
+ i+=2;
+ break;
+ }
+ } else { /* *starta > *startb, the overlap is at the end of b */
+ if (*enda*b->timescale <= *endb*a->timescale) { /* a is contained in b */
+ /* ignore a */
+ add_a = GF_FALSE;
+ /* force to get the next a */
+ i+=2;
+ break;
+ } else { /* *enda > *endb, the overlap is at the beginning of a */
+ /* update start of a */
+ *starta = (u64)((*startb * a->timescale)*1.0 / b->timescale);
+ /* ignore b, move on to the next b */
+ j+=2;
+ }
+ }
+ }
+ }
+ /* we've processed all b, but a has not been added */
+ /* first check if the next a is not contiguous */
+ if (add_a == GF_TRUE && i+2 < count_a) {
+ u64 *next_starta = (u64 *)gf_list_get(a->times, i+2);
+ //u64 *next_enda = (u64 *)gf_list_get(a->times, i+3);
+ if (*enda == *next_starta) {
+ *next_starta = *starta;
+ }
+ add_a = GF_FALSE;
+ }
+ if (add_a) {
+ gf_list_add(union_ranges->times, starta);
+ gf_list_add(union_ranges->times, enda);
+ }
+ i+=2;
+ }
+ gf_html_timeranges_merge(union_ranges);
+ return union_ranges;
+}
+
+GF_HTML_MediaTimeRanges *gf_html_timeranges_intersection(GF_HTML_MediaTimeRanges *a, GF_HTML_MediaTimeRanges *b)
{
- gf_html_timeranges_reset(range);
- gf_list_del(range->times);
- range->times = NULL;
+ GF_HTML_MediaTimeRanges *intersection_ranges;
+ u32 i, j, count_a, count_b;
+ intersection_ranges = gf_html_timeranges_new(a->timescale);
+ intersection_ranges->c = a->c;
+ intersection_ranges->_this = a->_this;
+ count_a = 0;
+ count_b = 0;
+ if (a) count_a = gf_list_count(a->times);
+ if (b) count_b = gf_list_count(b->times);
+ if (count_a != 0 && count_b != 0) {
+ i = 0;
+ j = 0;
+ while (i < count_a) {
+ u64 *starta = (u64 *)gf_list_get(a->times, i);
+ u64 *enda = (u64 *)gf_list_get(a->times, i+1);
+ while (j < count_b) {
+ u64 *startb = (u64 *)gf_list_get(b->times, j);
+ u64 *endb = (u64 *)gf_list_get(b->times, j+1);
+ if (*enda*b->timescale < *startb*a->timescale) {
+ /* this is no intersection with this a */
+ /* force to get the next a */
+ i+=2;
+ break;
+ } else if (*endb*a->timescale < *starta*b->timescale) {
+ /* this is no intersection with this b */
+ j+=2;
+ } else { /* there is an intersection */
+ if (*starta*b->timescale <= *startb*a->timescale) { /* the intersection starts at the beginning of b */
+ gf_list_add(intersection_ranges->times, startb);
+ if (*endb*a->timescale <= *enda*b->timescale) { /* b is contained in a */
+ gf_list_add(intersection_ranges->times, endb);
+ *starta = (u64)((*endb * a->timescale)*1.0 / b->timescale);
+ /* move on to the next b */
+ j+=2;
+ } else { /* *endb > *enda, the intersection ends at the end of a */
+ gf_list_add(intersection_ranges->times, enda);
+ /* update start of b */
+ *startb = (u64)((*enda * b->timescale)*1.0 / a->timescale);
+ /* move on to the next a */
+ i+=2;
+ break;
+ }
+ } else { /* *starta > *startb, the intersection starts at the beginning of a */
+ gf_list_add(intersection_ranges->times, startb);
+ if (*enda*b->timescale <= *endb*a->timescale) { /* a is contained in b */
+ gf_list_add(intersection_ranges->times, enda);
+ *startb = (u64)((*enda * b->timescale)*1.0 / a->timescale);
+ /* move on to the next a */
+ i+=2;
+ break;
+ } else { /* *enda > *endb, the intersection ends at the end of b */
+ gf_list_add(intersection_ranges->times, endb);
+ /* update start of a */
+ *starta = (u64)((*endb * a->timescale)*1.0 / b->timescale);
+ /* move on to the next b */
+ j+=2;
+ }
+ }
+ }
+ }
+ }
+ }
+ return intersection_ranges;
}
GF_HTML_Track *html_media_add_new_track_to_list(GF_HTML_TrackList *tracklist,
me->audioTracks.tracks = gf_list_new();
me->videoTracks.tracks = gf_list_new();
me->textTracks.tracks = gf_list_new();
- me->buffered.times = gf_list_new();
- me->played.times = gf_list_new();
- me->seekable.times = gf_list_new();
+ me->buffered = gf_html_timeranges_new(1);
+ me->played = gf_html_timeranges_new(1);
+ me->seekable = gf_html_timeranges_new(1);
return me;
}
gf_html_tracklist_del(&me->audioTracks);
gf_html_tracklist_del(&me->videoTracks);
gf_html_tracklist_del(&me->textTracks);
- gf_html_timeranges_del(&me->buffered);
- gf_html_timeranges_del(&me->seekable);
- gf_html_timeranges_del(&me->played);
+ gf_html_timeranges_del(me->buffered);
+ gf_html_timeranges_del(me->seekable);
+ gf_html_timeranges_del(me->played);
gf_free(me);
}
{
GF_HTML_MediaController *mc;
GF_SAFEALLOC(mc, GF_HTML_MediaController);
- mc->buffered.times = gf_list_new();
- mc->played.times = gf_list_new();
- mc->seekable.times = gf_list_new();
+ mc->buffered = gf_html_timeranges_new(1);
+ mc->played = gf_html_timeranges_new(1);
+ mc->seekable = gf_html_timeranges_new(1);
return mc;
}
GF_HTML_MediaElement *me = (GF_HTML_MediaElement *)gf_list_get(mc->media_elements, i);
me->controller = NULL;
}
- gf_html_timeranges_del(&mc->buffered);
- gf_html_timeranges_del(&mc->seekable);
- gf_html_timeranges_del(&mc->played);
+ gf_html_timeranges_del(mc->buffered);
+ gf_html_timeranges_del(mc->seekable);
+ gf_html_timeranges_del(mc->played);
gf_free(mc);
}
#endif
-GF_DOMEventTarget *gf_html_media_get_event_target_from_node(GF_Node *n) {
- GF_DOMEventTarget *target = NULL;
- //GF_HTML_MediaElement *me = html_media_element_get_from_node(c, n);
- //*target = me->evt_target;
-#ifndef GPAC_DISABLE_SVG
- if (!n->sgprivate->interact) {
- GF_SAFEALLOC(n->sgprivate->interact, struct _node_interactive_ext);
- }
- if (!n->sgprivate->interact->dom_evt) {
- n->sgprivate->interact->dom_evt = gf_dom_event_target_new(GF_DOM_EVENT_TARGET_HTML_MEDIA, n);
- }
- target = n->sgprivate->interact->dom_evt;
-#endif
- return target;
-}
-
ms->activeSourceBuffers.evt_target = gf_dom_event_target_new(GF_DOM_EVENT_TARGET_MSE_SOURCEBUFFERLIST, &ms->activeSourceBuffers);
ms->reference_count = 1;
ms->evt_target = gf_dom_event_target_new(GF_DOM_EVENT_TARGET_MSE_MEDIASOURCE, ms);
+ ms->durationType = DURATION_NAN;
return ms;
}
}
}
-static void gf_mse_fire_event(GF_DOMEventTarget *target, GF_EventType event_type)
+void gf_mse_fire_event(GF_DOMEventTarget *target, GF_EventType event_type)
{
GF_SceneGraph *sg = NULL;
GF_DOM_Event mse_event;
break;
}
assert(sg);
- sg_fire_dom_event(target, &mse_event, sg, NULL);
+ gf_sg_fire_dom_event(target, &mse_event, sg, NULL);
}
GF_EXPORT
GF_SAFEALLOC(source, GF_HTML_SourceBuffer);
sprintf(name, "SourceBuffer_Thread_%p", source);
source->mediasource = mediasource;
- source->buffered.times = gf_list_new();
+ source->buffered = gf_html_timeranges_new(1);
source->input_buffer = gf_list_new();
source->tracks = gf_list_new();
+ source->threads = gf_list_new();
source->parser_thread = gf_th_new(name);
source->remove_thread = gf_th_new(name);
source->append_mode = MEDIA_SOURCE_APPEND_MODE_SEGMENTS;
source->appendWindowStart = 0;
- source->appendWindowEnd = GF_MAX_DOUBLE;
+ source->appendWindowEnd = GF_MAX_DOUBLE;
source->evt_target = gf_dom_event_target_new(GF_DOM_EVENT_TARGET_MSE_SOURCEBUFFER, source);
+ source->timescale = 1;
return source;
}
-void gf_mse_add_source_buffer(GF_HTML_MediaSource *ms, GF_HTML_SourceBuffer *sb)
+void gf_mse_mediasource_add_source_buffer(GF_HTML_MediaSource *ms, GF_HTML_SourceBuffer *sb)
{
gf_list_add(ms->sourceBuffers.list, sb);
gf_mse_fire_event(ms->sourceBuffers.evt_target, GF_EVENT_HTML_MSE_ADD_SOURCE_BUFFER);
}
+/* Not yet used
+void gf_mse_add_active_source_buffer(GF_HTML_MediaSource *ms, GF_HTML_SourceBuffer *sb)
+{
+ gf_list_add(ms->activeSourceBuffers.list, sb);
+ gf_mse_fire_event(ms->activeSourceBuffers.evt_target, GF_EVENT_HTML_MSE_ADD_SOURCE_BUFFER);
+} */
+
+void gf_mse_remove_active_source_buffer(GF_HTML_MediaSource *ms, GF_HTML_SourceBuffer *sb) {
+ s32 activePos;
+ activePos = gf_list_find(ms->activeSourceBuffers.list, sb);
+ if (activePos >= 0) {
+ gf_list_rem(ms->activeSourceBuffers.list, activePos);
+ gf_mse_fire_event(ms->activeSourceBuffers.evt_target, GF_EVENT_HTML_MSE_REMOVE_SOURCE_BUFFER);
+ }
+}
+
static void gf_mse_reset_input_buffer(GF_List *input_buffer)
{
while (gf_list_count(input_buffer)) {
}
}
+/* Deletes all unparsed data buffers from all tracks in the source buffer */
+static void gf_mse_source_buffer_reset_parser(GF_HTML_SourceBuffer *sb)
+{
+ u32 i, track_count;
+ track_count = gf_list_count(sb->tracks);
+
+ /* wait until all remaining entire AU are parsed and then flush the remaining bytes in the parser */
+
+ for (i = 0; i < track_count; i++)
+ {
+ GF_HTML_Track *track = (GF_HTML_Track *)gf_list_get(sb->tracks, i);
+ track->last_dts_set = GF_FALSE;
+ track->highest_pts_set = GF_FALSE;
+ track->needs_rap = GF_TRUE;
+ }
+ sb->group_end_timestamp_set = GF_FALSE;
+ gf_mse_reset_input_buffer(sb->input_buffer);
+ sb->append_state = MEDIA_SOURCE_APPEND_STATE_WAITING_FOR_SEGMENT;
+}
+
+GF_Err gf_mse_source_buffer_abort(GF_HTML_SourceBuffer *sb)
+{
+ if (sb->updating) {
+ /* setting to false should stop the parsing thread */
+ sb->updating = GF_FALSE;
+ gf_mse_fire_event(sb->evt_target, GF_EVENT_HTML_MSE_UPDATE_ABORT);
+ gf_mse_fire_event(sb->evt_target, GF_EVENT_HTML_MSE_UPDATE_END);
+ }
+ gf_mse_source_buffer_reset_parser(sb);
+ sb->appendWindowStart = 0;
+ sb->appendWindowEnd = GF_MAX_DOUBLE;
+ return GF_OK;
+}
+
+GF_Err gf_mse_remove_source_buffer(GF_HTML_MediaSource *ms, GF_HTML_SourceBuffer *sb) {
+ s32 pos;
+ pos = gf_list_find(ms->sourceBuffers.list, sb);
+ if (pos < 0) {
+ return GF_NOT_FOUND;
+ } else {
+ gf_mse_source_buffer_abort(sb);
+ /* TODO: update the audio/video/text tracks */
+ gf_mse_remove_active_source_buffer(ms, sb);
+ gf_list_rem(ms->sourceBuffers.list, pos);
+ gf_mse_fire_event(ms->sourceBuffers.evt_target, GF_EVENT_HTML_MSE_REMOVE_SOURCE_BUFFER);
+ gf_mse_source_buffer_del(sb);
+ }
+ return GF_OK;
+}
+
+/* TODO: not yet used
+void gf_mse_detach(GF_HTML_MediaSource *ms) {
+ u32 count;
+ u32 i;
+ GF_HTML_SourceBuffer *sb;
+ ms->readyState = MEDIA_SOURCE_READYSTATE_CLOSED;
+ ms->durationType = DURATION_NAN;
+ count = gf_list_count(ms->sourceBuffers.list);
+ for (i = 0; i < count; i++) {
+ sb = (GF_HTML_SourceBuffer *)gf_list_get(ms->sourceBuffers.list, i);
+ gf_mse_remove_source_buffer(ms, sb);
+ }
+} */
+
void gf_mse_source_buffer_del(GF_HTML_SourceBuffer *sb)
{
GF_HTML_TrackList tlist;
- gf_html_timeranges_del(&sb->buffered);
+ gf_html_timeranges_del(sb->buffered);
gf_mse_reset_input_buffer(sb->input_buffer);
gf_list_del(sb->input_buffer);
tlist.tracks = sb->tracks;
gf_html_tracklist_del(&tlist);
+ {
+ u32 i, count;
+ count = gf_list_count(sb->threads);
+ for(i = 0; i < count; i++) {
+ GF_Thread *t = (GF_Thread *)gf_list_get(sb->threads, i);
+ gf_th_del(t);
+ }
+ gf_list_del(sb->threads);
+ }
gf_th_del(sb->parser_thread);
gf_th_del(sb->remove_thread);
return GF_OK;
}
+#define SECONDS_TO_TIMESCALE(s) ((s)*track->timescale)
+#define TIMESCALE_TO_SECONDS(u) ((u)*1.0/track->timescale)
+
+
+GF_HTML_MediaTimeRanges *gf_mse_timeranges_from_track_packets(GF_HTML_Track *track) {
+ u32 i, count;
+ GF_HTML_MediaTimeRanges *ranges;
+ u64 start;
+ u64 end=0;
+ Bool end_set = GF_FALSE;
+ GF_MSE_Packet *packet;
+
+ ranges = gf_html_timeranges_new(track->timescale);
+ count = gf_list_count(track->buffer);
+ for (i = 0; i < count; i++) {
+ packet = (GF_MSE_Packet *)gf_list_get(track->buffer, i);
+ if (end_set == GF_FALSE|| packet->sl_header.compositionTimeStamp > end) {
+ if (end_set == GF_TRUE) {
+ gf_html_timeranges_add_end(ranges, end);
+ }
+ start = packet->sl_header.compositionTimeStamp;
+ gf_html_timeranges_add_start(ranges, start);
+ end = packet->sl_header.compositionTimeStamp + packet->sl_header.au_duration;
+ end_set = GF_TRUE;
+ } else if (packet->sl_header.compositionTimeStamp == end) {
+ end = packet->sl_header.compositionTimeStamp + packet->sl_header.au_duration;
+ }
+ }
+ if (end_set == GF_TRUE) {
+ gf_html_timeranges_add_end(ranges, end);
+ }
+ return ranges;
+}
+
/* Traverses the list of Access Units already demuxed & parsed to update the buffered status */
void gf_mse_source_buffer_update_buffered(GF_HTML_SourceBuffer *sb) {
u32 i;
u32 track_count;
- double start= 0;
- double end = 0;
- Bool start_set = GF_FALSE;
- Bool end_set = GF_FALSE;
- u64 au_dur = 0;
- double packet_start;
- double packet_end;
-
- /* cleaning the current list */
- gf_html_timeranges_reset(&(sb->buffered));
-
- /* merging the start and end for all tracks */
+
track_count = gf_list_count(sb->tracks);
+ gf_html_timeranges_reset(sb->buffered);
for (i = 0; i < track_count; i++) {
- u32 j;
- u32 packet_count;
+ GF_HTML_MediaTimeRanges *track_ranges;
GF_HTML_Track *track = (GF_HTML_Track *)gf_list_get(sb->tracks, i);
gf_mx_p(track->buffer_mutex);
- packet_count = gf_list_count(track->buffer);
- au_dur = 0;
- for (j = 0; j < packet_count; j++) {
- GF_MSE_Packet *packet = (GF_MSE_Packet *)gf_list_get(track->buffer, j);
- if (packet) {
- packet_start = (packet->sl_header.compositionTimeStamp * 1.0 )/ track->timescale;
- if (packet->sl_header.au_duration) {
- au_dur = packet->sl_header.au_duration;
- } else {
- if (j > 0) {
- GF_MSE_Packet *prev = (GF_MSE_Packet *)gf_list_get(track->buffer, j-1);
- au_dur = packet->sl_header.decodingTimeStamp - prev->sl_header.decodingTimeStamp;
- }
- }
- packet_end = ((packet->sl_header.compositionTimeStamp + au_dur) * 1.0) / track->timescale;
- if (!start_set) {
- start = packet_start;
- start_set = GF_TRUE;
- } else {
- if (start > packet_start) {
- start = packet_start;
- }
- }
- if (!end_set) {
- end = packet_end;
- end_set = GF_TRUE;
- } else {
- if (end < packet_end) {
- end = packet_end;
- }
- }
- }
- }
+ track_ranges = gf_mse_timeranges_from_track_packets(track);
+ if (i != 0) {
+ GF_HTML_MediaTimeRanges *tmp;
+ tmp = gf_html_timeranges_intersection(sb->buffered, track_ranges);
+ gf_html_timeranges_del(track_ranges);
+ gf_list_del(sb->buffered->times);
+ sb->buffered->times = tmp->times;
+ sb->buffered->timescale = tmp->timescale;
+ gf_free(tmp);
+ } else {
+ gf_list_del(sb->buffered->times);
+ sb->buffered->times = track_ranges->times;
+ sb->buffered->timescale = track_ranges->timescale;
+ gf_free(track_ranges);
+ }
gf_mx_v(track->buffer_mutex);
}
-
- /* Creating only one range for now */
- if (start_set && end_set) {
- gf_media_time_ranges_add(&sb->buffered, start, end);
- }
}
-/* Deletes all unparsed data buffers from all tracks in the source buffer */
-static void gf_mse_source_buffer_reset_parser(GF_HTML_SourceBuffer *sb)
-{
- u32 i, track_count;
- track_count = gf_list_count(sb->tracks);
-
- /* wait until all remaining entire AU are parsed and then flush the remaining bytes in the parser */
-
- for (i = 0; i < track_count; i++)
- {
- GF_HTML_Track *track = (GF_HTML_Track *)gf_list_get(sb->tracks, i);
- track->last_dts_set = GF_FALSE;
- track->highest_pts_set = GF_FALSE;
- track->needs_rap = GF_TRUE;
- }
- sb->highest_end_timestamp_set = GF_FALSE;
- gf_mse_reset_input_buffer(sb->input_buffer);
- sb->append_state = MEDIA_SOURCE_APPEND_STATE_WAITING_FOR_SEGMENT;
+void gf_mse_source_buffer_set_timestampOffset(GF_HTML_SourceBuffer *sb, double d) {
+ u32 i;
+ sb->timestampOffset = (s64)(d*sb->timescale);
+ if (sb->append_mode == MEDIA_SOURCE_APPEND_MODE_SEQUENCE) {
+ sb->group_start_timestamp_flag = GF_TRUE;
+ sb->group_start_timestamp = sb->timestampOffset;
+ }
+ for (i = 0; i < gf_list_count(sb->tracks); i++) {
+ GF_HTML_Track *track = (GF_HTML_Track *)gf_list_get(sb->tracks, i);
+ track->timestampOffset = sb->timestampOffset*track->timescale;
+ }
}
-GF_Err gf_mse_source_buffer_abort(GF_HTML_SourceBuffer *sb)
-{
-/*
-if (sb->continuation_timestamp_flag == GF_FALSE)
- {
- if (sb->abort_mode == MEDIA_SOURCE_ABORT_MODE_CONTINUATION && !sb->highest_end_timestamp_set)
- {
- return GF_BAD_PARAM;
- }
+void gf_mse_source_buffer_set_timescale(GF_HTML_SourceBuffer *sb, u32 new_timescale) {
+ u32 old_timescale = sb->timescale;
+ if (old_timescale == new_timescale) return;
+ sb->timescale = new_timescale;
+ sb->timestampOffset = (s64)((sb->timestampOffset * new_timescale * 1.0)/old_timescale);
+ if (sb->group_start_timestamp_flag) {
+ sb->group_start_timestamp = (u64)((sb->group_start_timestamp * new_timescale * 1.0)/old_timescale);
+ }
+ if (sb->group_end_timestamp_set) {
+ sb->group_end_timestamp = (u64)((sb->group_end_timestamp * new_timescale * 1.0)/old_timescale);
+ }
+ sb->remove_start = (u64)((sb->remove_start * new_timescale * 1.0)/old_timescale);
+ sb->remove_end = (u64)((sb->remove_end * new_timescale * 1.0)/old_timescale);
- if (sb->highest_end_timestamp_set) {
- sb->continuation_timestamp = sb->highest_end_timestamp;
- sb->continuation_timestamp_flag = GF_TRUE;
- }
- }
-// sb->abort_mode = mode;
-*/
- gf_mse_source_buffer_set_update(sb, GF_FALSE);
- sb->appendWindowStart = 0;
- sb->appendWindowEnd = GF_MAX_DOUBLE;
- /*fire abort event at the SourceBuffer */
- gf_mse_source_buffer_reset_parser(sb);
- return GF_OK;
}
void gf_mse_packet_del(GF_MSE_Packet *packet) {
found_previous = GF_TRUE;
}
if (found_previous == GF_TRUE && p->sl_header.compositionTimeStamp > packet->sl_header.compositionTimeStamp) {
+ gf_mx_v(track->buffer_mutex);
return p;
}
}
u64 to)
{
u32 i;
- u32 frame_count;
gf_mx_p(track->buffer_mutex);
- frame_count = gf_list_count(track->buffer);
- for (i = 0; i < frame_count; i++) {
+ i = 0;
+ while (i < gf_list_count(track->buffer)) {
GF_MSE_Packet *frame = (GF_MSE_Packet *)gf_list_get(track->buffer, i);
- if (frame->sl_header.compositionTimeStamp >= from && frame->sl_header.compositionTimeStamp < to) {
- GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MSE] Removing frame %g (%d frames)\n", (frame->sl_header.compositionTimeStamp*1.0)/track->timescale, gf_list_count(track->buffer)));
+ if (frame->sl_header.compositionTimeStamp >= to) {
+ break;
+ } else if (frame->sl_header.compositionTimeStamp >= from && frame->sl_header.compositionTimeStamp < to) {
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MSE] Removing frame with PTS %g s (%d frames remaining)\n", TIMESCALE_TO_SECONDS(frame->sl_header.compositionTimeStamp), gf_list_count(track->buffer)));
gf_list_rem(track->buffer, i);
- }
+ } else {
+ i++;
+ }
}
gf_mx_v(track->buffer_mutex);
}
+static void gf_mse_track_buffer_add_packet(GF_HTML_Track *track, GF_MSE_Packet *frame)
+{
+ u32 i, count;
+ Bool inserted = GF_FALSE;
+
+ gf_mx_p(track->buffer_mutex);
+ /* TODO: improve insertion*/
+ count = gf_list_count(track->buffer);
+ for (i = 0; i < count; i++) {
+ GF_MSE_Packet *next_frame = (GF_MSE_Packet *)gf_list_get(track->buffer, i);
+ if (frame->sl_header.decodingTimeStamp < next_frame->sl_header.decodingTimeStamp) {
+ gf_list_insert(track->buffer, frame, i);
+ /* if the frame had no duration, we can now tell its duration because of the next frame */
+ if (!frame->sl_header.au_duration) {
+ frame->sl_header.au_duration = (u32)(next_frame->sl_header.decodingTimeStamp - frame->sl_header.decodingTimeStamp);
+ /* we need also to check the duration of the previous frame */
+ if (i > 0) {
+ GF_MSE_Packet *prev_frame = (GF_MSE_Packet *)gf_list_get(track->buffer, i-1);
+ /* we update the frame duration if the newly inserted frame modifies it */
+ if (!prev_frame->sl_header.au_duration ||
+ prev_frame->sl_header.au_duration > frame->sl_header.decodingTimeStamp - prev_frame->sl_header.decodingTimeStamp) {
+ prev_frame->sl_header.au_duration = (u32)(frame->sl_header.decodingTimeStamp - prev_frame->sl_header.decodingTimeStamp);
+ }
+ }
+ }
+ inserted = GF_TRUE;
+ break;
+ }
+ }
+ if (!inserted) {
+ gf_list_add(track->buffer, frame);
+ /* if the frame is inserted last, we cannot know its duration until a new frame is appended or unless the transport format carried it */
+ count = gf_list_count(track->buffer);
+ if (count > 1) {
+ GF_MSE_Packet *prev_frame = (GF_MSE_Packet *)gf_list_get(track->buffer, count-2);
+ /* we update the frame duration if the newly inserted frame modifies it */
+ if (!prev_frame->sl_header.au_duration ||
+ prev_frame->sl_header.au_duration > frame->sl_header.decodingTimeStamp - prev_frame->sl_header.decodingTimeStamp) {
+ prev_frame->sl_header.au_duration = (u32)(frame->sl_header.decodingTimeStamp - prev_frame->sl_header.decodingTimeStamp);
+ }
+ }
+ }
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MSE] Adding frame with PTS %g s and duration %g s (%d frames in buffer)\n", TIMESCALE_TO_SECONDS(frame->sl_header.compositionTimeStamp), TIMESCALE_TO_SECONDS(frame->sl_header.au_duration), gf_list_count(track->buffer)));
+ gf_mx_v(track->buffer_mutex);
+}
+
static GF_Err gf_mse_process_coded_frame(GF_HTML_SourceBuffer *sb,
GF_HTML_Track *track,
GF_MSE_Packet *frame,
Bool *stored)
{
+ s64 PTS_with_offset = frame->sl_header.compositionTimeStamp + frame->sl_header.timeStampOffset;
+ s64 DTS_with_offset = frame->sl_header.decodingTimeStamp + frame->sl_header.timeStampOffset;
*stored = GF_FALSE;
+
if (sb->append_mode == MEDIA_SOURCE_APPEND_MODE_SEQUENCE && sb->group_start_timestamp_flag) {
- sb->timestampOffset = sb->group_start_timestamp - (frame->sl_header.compositionTimeStamp*1.0/track->timescale);
- sb->highest_end_timestamp = sb->group_start_timestamp;
- track->needs_rap = GF_TRUE; /* fix: should be on all track buffers */
+ u32 i, count;
+ /* compute the new offset without taking care of the previous one, since this is a new coded frame group */
+ /* first adjust existing times to the new timescale */
+ gf_mse_source_buffer_set_timescale(sb, track->timescale);
+ sb->timestampOffset = (sb->group_start_timestamp - frame->sl_header.compositionTimeStamp);
+ track->timestampOffset = (sb->group_start_timestamp - frame->sl_header.compositionTimeStamp);
+ sb->group_end_timestamp = sb->group_start_timestamp;
+ count = gf_list_count(sb->tracks);
+ for (i = 0; i < count; i++) {
+ GF_HTML_Track *t = (GF_HTML_Track *)gf_list_get(sb->tracks, i);
+ t->needs_rap = GF_TRUE;
+ }
sb->group_start_timestamp_flag = GF_FALSE;
}
- if (sb->timestampOffset != 0) {
- u64 offset = (u64)((sb->timestampOffset)*track->timescale);
- if (offset > frame->sl_header.compositionTimeStamp || offset > frame->sl_header.decodingTimeStamp) {
- return GF_NON_COMPLIANT_BITSTREAM;
- }
- frame->sl_header.compositionTimeStamp += (u64)(sb->timestampOffset*track->timescale);
- frame->sl_header.decodingTimeStamp += (u64)(sb->timestampOffset*track->timescale);
- /* check if the new CTS/DTS are in range */
+ if (track->timestampOffset != 0) {
+ frame->sl_header.timeStampOffset = track->timestampOffset;
+ PTS_with_offset = frame->sl_header.compositionTimeStamp + frame->sl_header.timeStampOffset;
+ DTS_with_offset = frame->sl_header.decodingTimeStamp + frame->sl_header.timeStampOffset;
}
if (track->last_dts_set) {
- if (track->last_dts*track->timescale > frame->sl_header.decodingTimeStamp) {
- return GF_NON_COMPLIANT_BITSTREAM;
- }
-
- /* why ???
- * If last decode timestamp for track buffer is set and decode timestamp is less than last decode timestamp
- * or the difference between decode timestamp and last decode timestamp is greater than 100 milliseconds,
- * then call endOfStream("decode") and abort these steps.
- */
- if (frame->sl_header.decodingTimeStamp - track->last_dts*track->timescale > 0.1*track->timescale) {
- return GF_NON_COMPLIANT_BITSTREAM;
+ if (DTS_with_offset < (s64) track->last_dts ||
+ DTS_with_offset - track->last_dts > 2*track->last_dur) {
+ /* A discontinuity in the timestamps is detected, this triggers the start of a new coded frame group */
+ if (sb->append_mode == MEDIA_SOURCE_APPEND_MODE_SEGMENTS) {
+ /* the current group ends at the start of this frame */
+ /* check if sb.timescale has to be adjusted first with gf_mse_source_buffer_set_timescale(sb, track->timescale);*/
+ sb->group_end_timestamp = PTS_with_offset;
+ sb->group_end_timestamp_set = GF_TRUE;
+ } else { /* sb->append_mode == MEDIA_SOURCE_APPEND_MODE_SEQUENCE */
+ /* check if sb.timescale has to be adjusted first with gf_mse_source_buffer_set_timescale(sb, track->timescale);*/
+ sb->group_start_timestamp = sb->group_end_timestamp;
+ sb->group_start_timestamp_flag = GF_TRUE;
+ }
+ {
+ u32 i, count;
+ count = gf_list_count(sb->tracks);
+ for (i = 0; i < count; i++) {
+ GF_HTML_Track *t = (GF_HTML_Track *)gf_list_get(sb->tracks, i);
+ t->last_dts_set = GF_FALSE;
+ t->last_dts = 0;
+ t->last_dur = 0;
+ t->highest_pts_set = GF_FALSE;
+ t->highest_pts = 0;
+ t->needs_rap = GF_TRUE;
+ }
+ }
+ return gf_mse_process_coded_frame(sb, track, frame, stored);
}
}
+
+ /* we only update the timestamps in the frame when we are sure the offset is the right one */
+ frame->sl_header.compositionTimeStamp += frame->sl_header.timeStampOffset;
+ frame->sl_header.decodingTimeStamp += frame->sl_header.timeStampOffset;
+ frame->sl_header.timeStampOffset = 0;
- if (frame->sl_header.compositionTimeStamp < sb->appendWindowStart*track->timescale) {
+ if (frame->sl_header.compositionTimeStamp < SECONDS_TO_TIMESCALE(sb->appendWindowStart)) {
track->needs_rap = GF_TRUE;
return GF_OK;
}
- if (frame->sl_header.compositionTimeStamp /* + dur */ > sb->appendWindowEnd*track->timescale) {
+ if (frame->sl_header.compositionTimeStamp + frame->sl_header.au_duration > SECONDS_TO_TIMESCALE(sb->appendWindowEnd)) {
track->needs_rap = GF_TRUE;
return GF_OK;
}
GF_MSE_Packet *overlapped_packet;
overlapped_packet = gf_mse_find_overlapped_packet(track, frame);
if (overlapped_packet) {
- gf_mse_remove_frames_from_to(track, overlapped_packet->sl_header.compositionTimeStamp, overlapped_packet->sl_header.compositionTimeStamp + (u64)(0.000001*track->timescale));
+ gf_mse_remove_frames_from_to(track, overlapped_packet->sl_header.compositionTimeStamp,
+ overlapped_packet->sl_header.compositionTimeStamp + (u64)SECONDS_TO_TIMESCALE(0.000001));
}
}
if (!track->highest_pts_set) {
/* this is the first time a frame is processed in the append sequence */
- gf_mse_remove_frames_from_to(track, frame->sl_header.compositionTimeStamp, frame->sl_header.compositionTimeStamp /* + dur */);
- } else if (track->highest_pts*track->timescale <= frame->sl_header.compositionTimeStamp) {
+ gf_mse_remove_frames_from_to(track, frame->sl_header.compositionTimeStamp, frame->sl_header.compositionTimeStamp + frame->sl_header.au_duration);
+ } else if (track->highest_pts <= frame->sl_header.compositionTimeStamp) {
/* the highest pts has already been set in this append sequence, so we just need to remove frames from that point on, it's safe */
- gf_mse_remove_frames_from_to(track, (u64)(track->highest_pts*track->timescale), (u64)(track->highest_pts*track->timescale) /* + dur */);
+ gf_mse_remove_frames_from_to(track, track->highest_pts, track->highest_pts + track->last_dur);
}
- /* remove dependencies: no way !! */
+ /* remove dependencies !! */
/* TODO: spliced frames */
*stored = GF_TRUE;
- gf_mx_p(track->buffer_mutex);
- gf_list_add(track->buffer, frame);
- GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MSE] Adding frame %g (%d frames)\n", (frame->sl_header.compositionTimeStamp*1.0)/track->timescale, gf_list_count(track->buffer)));
- gf_mx_v(track->buffer_mutex);
+ /* adds the packet and update the previous frame duration */
+ gf_mse_track_buffer_add_packet(track, frame);
- track->last_dts = (frame->sl_header.decodingTimeStamp*1.0/track->timescale);
+ track->last_dts = frame->sl_header.decodingTimeStamp;
track->last_dts_set = GF_TRUE;
+ if (frame->sl_header.au_duration) {
+ track->last_dur = frame->sl_header.au_duration;
+ } else {
+ /* assuming CFR - FIXME */
+ frame->sl_header.au_duration = track->last_dur;
+ }
- if (!track->highest_pts_set || (frame->sl_header.compositionTimeStamp /* + dur */) > track->highest_pts*track->timescale) {
+ if (!track->highest_pts_set ||
+ (frame->sl_header.compositionTimeStamp + track->last_dur) > track->highest_pts) {
track->highest_pts_set = GF_TRUE;
- track->highest_pts = (frame->sl_header.compositionTimeStamp*1.0/track->timescale /* + dur */);
+ track->highest_pts = frame->sl_header.compositionTimeStamp + frame->sl_header.au_duration;
}
- if (!sb->highest_end_timestamp_set || (frame->sl_header.compositionTimeStamp*1.0 /* + dur */) > sb->highest_end_timestamp * track->timescale) {
- sb->highest_end_timestamp_set = GF_TRUE;
- sb->highest_end_timestamp = (frame->sl_header.compositionTimeStamp*1.0/track->timescale /* + dur */);
+ if (!sb->group_end_timestamp_set || (frame->sl_header.compositionTimeStamp + frame->sl_header.au_duration > sb->group_end_timestamp)) {
+ /* check if sb.timescale has to be adjusted first with gf_mse_source_buffer_set_timescale(sb, track->timescale);*/
+ sb->group_end_timestamp = frame->sl_header.compositionTimeStamp + frame->sl_header.au_duration;
+ sb->group_end_timestamp_set = GF_TRUE;
}
return GF_OK;
* AU are placed as GF_MSE_Packets in the track buffer
*/
track_count = gf_list_count(sb->tracks);
- while (1) {
+ while (sb->updating) {
u32 track_with_data = 0;
for (i = 0; i < track_count; i++) {
Bool stored = GF_FALSE;
char *data;
assert(packet->is_new_data && packet->size);
data = (char *)gf_malloc(packet->size);
- GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MSE] New AU parsed %g\n", (packet->sl_header.compositionTimeStamp*1.0/track->timescale)));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MSE] New AU parsed with PTS %g s\n", TIMESCALE_TO_SECONDS(packet->sl_header.compositionTimeStamp)));
memcpy(data, packet->data, packet->size);
packet->data = data;
gf_mse_process_coded_frame(sb, track, packet, &stored);
gf_list_add(sb->input_buffer, buffer);
/* Call the parser (asynchronously) and return */
/* the updating attribute will be positioned back to 0 when the parser is done */
- gf_th_run(sb->parser_thread, gf_mse_parse_segment, sb);
-}
-
-/*
-FIXME : Unused function, create warnings on debian
-static void gf_mse_source_buffer_append_error(GF_HTML_SourceBuffer *sb)
-{
- sb->updating = GF_FALSE;
- gf_mse_source_buffer_reset_parser(sb);
- TODO: fire events
+ {
+ GF_Thread *t = gf_th_new(NULL);
+ gf_list_add(sb->threads, t);
+ gf_th_run(t, gf_mse_parse_segment, sb);
+ }
}
-*/
/* Threaded function called upon request from JS
- Removes data in each track buffer until the next RAP is found */
-u32 gf_mse_source_buffer_remove(void *par)
+static u32 gf_mse_source_buffer_remove(void *par)
{
GF_HTML_SourceBuffer *sb = (GF_HTML_SourceBuffer *)par;
u32 i;
u32 track_count;
u32 frame_count;
u64 end = 0;
- //Bool end_set;
+
+ gf_mse_source_buffer_set_update(sb, GF_TRUE);
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MSE] Removing media until next RAP\n"));
track_count = gf_list_count(sb->tracks);
for (i = 0; i < track_count; i++) {
GF_HTML_Track *track = (GF_HTML_Track *)gf_list_get(sb->tracks, i);
- //end_set = GF_FALSE;
-
+ gf_mse_source_buffer_set_timescale(sb, track->timescale);
/* find the next random access point */
gf_mx_p(track->buffer_mutex);
frame_count = gf_list_count(track->buffer);
for (j = 0; j < frame_count; j++) {
GF_MSE_Packet *frame = (GF_MSE_Packet *)gf_list_get(track->buffer, j);
- if ((frame->sl_header.randomAccessPointFlag &&
- frame->sl_header.compositionTimeStamp >= sb->remove_end*track->timescale) ||
- (j == frame_count - 1)) {
+ if (frame->sl_header.randomAccessPointFlag &&
+ frame->sl_header.compositionTimeStamp >= sb->remove_end) {
end = frame->sl_header.compositionTimeStamp;
- //end_set = GF_TRUE;
break;
}
}
gf_mx_v(track->buffer_mutex);
+ if (!end) end = (u64)SECONDS_TO_TIMESCALE(sb->remove_end);
/* remove up to the next RAP found */
- gf_mse_remove_frames_from_to(track, (u64)sb->remove_start, end);
+ gf_mse_remove_frames_from_to(track, sb->remove_start, end);
}
gf_mse_source_buffer_set_update(sb, GF_FALSE);
return 0;
}
+void gf_mse_remove(GF_HTML_SourceBuffer *sb, double start, double end)
+{
+ sb->remove_start = (u64)(start*sb->timescale);
+ sb->remove_end = (u64)(end*sb->timescale);
+ {
+ GF_Thread *t = gf_th_new(NULL);
+ gf_list_add(sb->threads, t);
+ gf_th_run(t, gf_mse_source_buffer_remove, sb);
+ }
+}
+
/* Callback functions used by a media parser when parsing events happens */
GF_Err gf_mse_proxy(GF_InputService *parser, GF_NetworkCommand *command)
{
*out_reception_status = packet->status;
*is_new_data = packet->is_new_data;
packet->is_new_data = GF_FALSE;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MSE_IN] Sending AU #%d/%d to decoder with TS: %g \n", track->packet_index, count, (packet->sl_header.compositionTimeStamp*1.0/track->timescale)));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MSE_IN] Sending AU #%d/%d to decoder with PTS %g s\n", track->packet_index, count, TIMESCALE_TO_SECONDS(packet->sl_header.compositionTimeStamp)));
} else {
*out_data_ptr = NULL;
*out_data_size = 0;
gf_mx_v(track->buffer_mutex);
return GF_OK;
}
+
+
#endif
jpx.src.resync_to_restart = jpeg_resync_to_restart;
jpx.src.term_source = gf_jpeg_stub;
jpx.skip = 0;
- jpx.src.next_input_byte = jpg;
+ jpx.src.next_input_byte = (JOCTET *) jpg;
jpx.src.bytes_in_buffer = jpg_size;
jpx.cinfo.src = (void *) &jpx.src;
if (!stricmp(att->value, "ISMA"))info->crypt_type = 1;
else if (!stricmp(att->value, "CENC AES-CTR")) info->crypt_type = 2;
else if (!stricmp(att->value, "CENC AES-CBC")) info->crypt_type = 3;
+ else if (!stricmp(att->value, "ADOBE")) info->crypt_type = 4;
}
}
return;
else if (!strncmp(att->value, "roll=", 5))
tkc->keyRoll = atoi(att->value+5);
}
+ else if (!stricmp(att->name, "metadata")) {
+ tkc->metadata_len = gf_base64_encode(att->value, (u32) strlen(att->value), tkc->metadata, 5000);
+ tkc->metadata[tkc->metadata_len] = 0;
+ }
+ }
+
+ if ((info->crypt_type == 3) && (tkc->IV_size == 8)) {
+ GF_LOG(GF_LOG_WARNING, GF_LOG_AUTHOR, ("[CENC] Using AES-128 CBC: IV_size should be 16\n"));
+ tkc->IV_size = 16;
}
}
}
else {
prev_block_count = BSO / 16;
- remain = BSO % 16;
+ remain = (u32) (BSO % 16);
}
tmp = gf_bs_new(IV, 16, GF_BITSTREAM_READ);
pleintext_bs = gf_bs_new(samp->data, samp->dataLength, GF_BITSTREAM_READ);
cyphertext_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
sai_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
- gf_bs_write_data(sai_bs, IV, IV_size);
+ gf_bs_write_data(sai_bs, IV, 16);
subsamples = gf_list_new();
if (!subsamples) {
e = GF_IO_ERR;
if (tci->enc_type == 2)
gf_cenc_encrypt_sample_ctr(mc, samp, is_nalu_video, nalu_size_length, IV, tci->IV_size, &buf, &len, bytes_in_nalhr);
- else if (tci->enc_type == 3)
+ else if (tci->enc_type == 3) {
+ int IV_size = 16;
+ gf_crypt_get_state(mc, IV, &IV_size);
gf_cenc_encrypt_sample_cbc(mc, samp, is_nalu_video, nalu_size_length, IV, tci->IV_size, &buf, &len, bytes_in_nalhr);
+ }
gf_isom_update_sample(mp4, track, i+1, samp, 1);
gf_isom_sample_del(&samp);
gf_bs_del(bs);
gf_crypt_set_state(mc, IV, 17);
}
+ else if (tci->enc_type == 3) {
+ memmove(IV, sai->IV, 16);
+ gf_crypt_set_state(mc, IV, 16);
+ }
e = gf_crypt_set_key(mc, tci->key, 16, IV);
if (e) {
GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[CENC] Cannot set key AES-128 %s (%s)\n", (tci->enc_type == 2) ? "CTR" : "CBC", gf_error_to_string(e)) );
return e;
}
+GF_Err gf_adobe_encrypt_track(GF_ISOFile *mp4, GF_TrackCryptInfo *tci, void (*progress)(void *cbk, u64 done, u64 total), void *cbk)
+{
+ GF_Err e;
+ char IV[16];
+ GF_ISOSample *samp;
+ GF_Crypt *mc;
+ Bool all_rap = GF_FALSE;
+ u32 i, count, di, track, len;
+ Bool has_crypted_samp;
+ char *buf;
+ GF_BitStream *bs;
+ int IV_size;
+
+ e = GF_OK;
+ samp = NULL;
+ mc = NULL;
+ buf = NULL;
+ bs = NULL;
+
+ track = gf_isom_get_track_by_id(mp4, tci->trackID);
+ if (!track) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[Adobe] Cannot find TrackID %d in input file - skipping\n", tci->trackID));
+ return GF_OK;
+ }
+
+ mc = gf_crypt_open("AES-128", "CBC");
+ if (!mc) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[Adobe] Cannot open AES-128 CBC \n"));
+ e = GF_IO_ERR;
+ goto exit;
+ }
+
+ /*Adobe's protection scheme does not support selective key*/
+ memcpy(tci->key, tci->keys[0], 16);
+
+ e = gf_isom_set_adobe_protection(mp4, track, 1, GF_ISOM_ADOBE_SCHEME, 1, GF_TRUE, tci->metadata, tci->metadata_len);
+ if (e) goto exit;
+
+ count = gf_isom_get_sample_count(mp4, track);
+ has_crypted_samp = GF_FALSE;
+ if (! gf_isom_has_sync_points(mp4, track))
+ all_rap = GF_TRUE;
+
+ gf_isom_set_nalu_extract_mode(mp4, track, GF_ISOM_NALU_EXTRACT_INSPECT);
+ for (i = 0; i < count; i++) {
+ Bool is_encrypted_au = GF_TRUE;
+ samp = gf_isom_get_sample(mp4, track, i+1, &di);
+ if (!samp)
+ {
+ e = GF_IO_ERR;
+ goto exit;
+ }
+
+ len = samp->dataLength;
+ buf = (char *) gf_malloc(len*sizeof(char));
+ memmove(buf, samp->data, len);
+ gf_free(samp->data);
+ samp->dataLength = 0;
+
+ switch (tci->sel_enc_type) {
+ case GF_CRYPT_SELENC_RAP:
+ if (!samp->IsRAP && !all_rap) {
+ is_encrypted_au = GF_FALSE;
+ }
+ break;
+ case GF_CRYPT_SELENC_NON_RAP:
+ if (samp->IsRAP || all_rap) {
+ is_encrypted_au = GF_FALSE;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (is_encrypted_au) {
+ u32 padding_bytes;
+ if (!has_crypted_samp) {
+ memset(IV, 0, sizeof(char)*16);
+ memcpy(IV, tci->first_IV, sizeof(char)*16);
+ e = gf_crypt_init(mc, tci->key, 16, IV);
+ if (e) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[ADOBE] Cannot initialize AES-128 CBC (%s)\n", gf_error_to_string(e)) );
+ gf_crypt_close(mc);
+ mc = NULL;
+ e = GF_IO_ERR;
+ goto exit;
+ }
+ has_crypted_samp = GF_TRUE;
+ }
+ else {
+ IV_size = 16;
+ e = gf_crypt_get_state(mc, IV, &IV_size);
+ }
+
+ padding_bytes = 16 - len % 16;
+ len += padding_bytes;
+ buf = (char *)gf_realloc(buf, len);
+ memset(buf+len-padding_bytes, padding_bytes, padding_bytes);
+
+ gf_crypt_encrypt(mc, buf, len);
+ }
+
+ /*rewrite sample with AU header*/
+ bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
+ if (is_encrypted_au) {
+ gf_bs_write_u8(bs, 0x10);
+ gf_bs_write_data(bs, (char *) IV, 16);
+ }
+ else {
+ gf_bs_write_u8(bs, 0x0);
+ }
+ gf_bs_write_data(bs, buf, len);
+ gf_bs_get_content(bs, &samp->data, &samp->dataLength);
+ gf_bs_del(bs);
+ bs = NULL;
+ gf_isom_update_sample(mp4, track, i+1, samp, 1);
+ gf_isom_sample_del(&samp);
+ samp = NULL;
+ gf_free(buf);
+ buf = NULL;
+
+ gf_set_progress("Adobe's protection scheme Encrypt", i+1, count);
+ }
+
+exit:
+ if (samp) gf_isom_sample_del(&samp);
+ if (mc) gf_crypt_close(mc);
+ if (buf) gf_free(buf);
+ if (bs) gf_bs_del(bs);
+ return e;
+}
+
+GF_Err gf_adobe_decrypt_track(GF_ISOFile *mp4, GF_TrackCryptInfo *tci, void (*progress)(void *cbk, u64 done, u64 total), void *cbk)
+{
+ GF_Err e;
+ u32 track, count, len, i, prev_sample_decrypted, si;
+ u8 encrypted_au;
+ GF_Crypt *mc;
+ GF_ISOSample *samp;
+ char IV[17];
+ char *ptr;
+ GF_BitStream *bs;
+
+ e = GF_OK;
+ mc = NULL;
+ samp = NULL;
+ bs = NULL;
+ prev_sample_decrypted = GF_FALSE;
+
+ track = gf_isom_get_track_by_id(mp4, tci->trackID);
+ if (!track) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[ADOBE] Cannot find TrackID %d in input file - skipping\n", tci->trackID));
+ return GF_OK;
+ }
+
+ mc = gf_crypt_open("AES-128", "CBC");
+ if (!mc) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[ADOBE] Cannot open AES-128 CBC\n"));
+ e = GF_IO_ERR;
+ goto exit;
+ }
+
+ memcpy(tci->key, tci->keys[0], 16);
+
+ count = gf_isom_get_sample_count(mp4, track);
+ gf_isom_set_nalu_extract_mode(mp4, track, GF_ISOM_NALU_EXTRACT_INSPECT);
+ for (i = 0; i < count; i++) {
+ u32 trim_bytes = 0;
+ samp = gf_isom_get_sample(mp4, track, i+1, &si);
+ if (!samp)
+ {
+ e = GF_IO_ERR;
+ goto exit;
+ }
+
+ ptr = samp->data;
+ len = samp->dataLength;
+
+ encrypted_au = ptr[0];
+ if (encrypted_au) {
+ memmove(IV, ptr+1, 16);
+ if (!prev_sample_decrypted) {
+ e = gf_crypt_init(mc, tci->key, 16, IV);
+ if (e) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[ADOBE] Cannot initialize AES-128 CBC (%s)\n", gf_error_to_string(e)) );
+ gf_crypt_close(mc);
+ mc = NULL;
+ e = GF_IO_ERR;
+ goto exit;
+ }
+ prev_sample_decrypted = GF_TRUE;
+ }
+ else {
+ e = gf_crypt_set_state(mc, IV, 16);
+ if (e) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[ADOBE] Cannot set state AES-128 CBC (%s)\n", gf_error_to_string(e)) );
+ gf_crypt_close(mc);
+ mc = NULL;
+ e = GF_IO_ERR;
+ goto exit;
+ }
+ }
+
+ ptr += 17;
+ len -= 17;
+
+ gf_crypt_decrypt(mc, ptr, len);
+ trim_bytes = ptr[len-1];
+ }
+ else {
+ ptr += 1;
+ len -= 1;
+ }
+
+ //rewrite decrypted sample
+ bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
+ gf_bs_write_data(bs, ptr, len - trim_bytes);
+ gf_free(samp->data);
+ samp->dataLength = 0;
+ gf_bs_get_content(bs, &samp->data, &samp->dataLength);
+ gf_isom_update_sample(mp4, track, i+1, samp, 1);
+ gf_bs_del(bs);
+ bs = NULL;
+ gf_isom_sample_del(&samp);
+ samp = NULL;
+ gf_set_progress("Adobe's protection scheme Decrypt", i+1, count);
+ }
+
+ /*remove protection info*/
+ e = gf_isom_remove_track_protection(mp4, track, 1);
+ if (e) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[ADOBE] Error Adobe's protection scheme signature from trackID %d: %s\n", tci->trackID, gf_error_to_string(e)));
+ }
+
+exit:
+ if (mc) gf_crypt_close(mc);
+ if (samp) gf_isom_sample_del(&samp);
+ if (bs) gf_bs_del(bs);
+ return e;
+}
+
GF_EXPORT
GF_Err gf_decrypt_file(GF_ISOFile *mp4, const char *drm_file)
tci.enc_type = 3;
gf_decrypt_track = gf_cenc_decrypt_track;
break;
+ case 4:
+ gf_decrypt_track = gf_adobe_decrypt_track;
+ break;
default:
GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[CENC/ISMA] Encryption type not supported\n"));
return GF_NOT_SUPPORTED;;
}
KMS_URI = "OMA DRM";
is_oma = 1;
- } else if (!gf_isom_is_cenc_media(mp4, i+1, 1)){
+ } else if (!gf_isom_is_cenc_media(mp4, i+1, 1) && !gf_isom_is_adobe_protection_media(mp4, i+1, 1)){
GF_LOG(GF_LOG_WARNING, GF_LOG_AUTHOR, ("[CENC/ISMA] TrackID %d encrypted with unknown scheme %s - skipping\n", trackID, gf_4cc_to_str(scheme_type) ));
continue;
}
tci->enc_type = 3;
gf_encrypt_track = gf_cenc_encrypt_track;
break;
+ case 4:
+ gf_encrypt_track = gf_adobe_encrypt_track;
+ break;
default:
GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[CENC/ISMA] Encryption type not sopported\n"));
return GF_NOT_SUPPORTED;;
}
return gf_isom_set_track_layout_info(file, track, tk_w<<16, tk_h<<16, 0, 0, 0);
}
+
+GF_EXPORT
+GF_Err gf_media_remove_non_rap(GF_ISOFile *file, u32 track)
+{
+ GF_Err e;
+ u32 i, count, di;
+ u64 offset, dur, last_dts;
+ Bool all_raps = (gf_isom_has_sync_points(file, track)==0) ? 1 : 0;
+ if (all_raps) return GF_OK;
+
+ last_dts = 0;
+ dur = gf_isom_get_media_duration(file, track);
+
+ gf_isom_set_cts_packing(file, track, 1);
+
+ count = gf_isom_get_sample_count(file, track);
+ for (i=0; i<count; i++) {
+ GF_ISOSample *samp = gf_isom_get_sample_info(file, track, i+1, &di, &offset);
+ if (!samp) return gf_isom_last_error(file);
+
+ if (samp->IsRAP) {
+ last_dts = samp->DTS;
+ gf_isom_sample_del(&samp);
+ continue;
+ }
+ gf_isom_sample_del(&samp);
+ e = gf_isom_remove_sample(file, track, i+1);
+ if (e) return e;
+ i--;
+ count--;
+ }
+ gf_isom_set_cts_packing(file, track, 0);
+ gf_isom_set_last_sample_duration(file, track, (u32) (dur - last_dts) );
+ return GF_OK;
+}
+
#endif /*GPAC_DISABLE_ISOM_WRITE*/
GF_EXPORT
GF_SAFEALLOC(ar, GF_HEVCParamArray);
ar->nalus = gf_list_new();
ar->type = type;
- gf_list_add(hevc_cfg->param_array, ar);
+ if (ar->type == GF_HEVC_NALU_VID_PARAM)
+ gf_list_insert(hevc_cfg->param_array, ar, 0);
+ else
+ gf_list_add(hevc_cfg->param_array, ar);
return ar;
}
{
SHVCTrackInfo sti[64];
GF_HEVCConfig *hevccfg, *shvccfg;
- u32 i, count, cur_extract_mode, j, k;
+ u32 i, count, cur_extract_mode, j, k, max_layer_id;
char *nal_data=NULL;
u32 nal_alloc_size;
GF_Err e = GF_OK;
memset(sti, 0, sizeof(sti));
sti[0].track_num = track;
-
+ max_layer_id = 0;
//split all SPS/PPS/VPS from svccfg
count = gf_list_count(shvccfg->param_array);
for (i=0; i<count; i++) {
if (!splitAll) layer_id = 1;
+ if (max_layer_id < layer_id)
+ max_layer_id = layer_id;
+
if (!sti[layer_id].shvccfg) {
+ GF_List *backup_list;
sti[layer_id].shvccfg = gf_odf_hevc_cfg_new();
+ backup_list = sti[layer_id].shvccfg->param_array;
+ memcpy(sti[layer_id].shvccfg , shvccfg ? shvccfg : hevccfg, sizeof(GF_HEVCConfig));
+ sti[layer_id].shvccfg->param_array = backup_list;
+
sti[layer_id].shvccfg->is_shvc = 1;
sti[layer_id].shvccfg->complete_representation = 1;
sti[layer_id].shvccfg->num_layers = 1;
count2--;
}
}
- //remove shvc config
+
+ //CLARIFY wether this is correct: we duplicate all VPS in the enhancement layer ...
+ //we do this because if we split the tracks some info for setting up the enhancement layer
+ //is in the VPS
+ count = gf_list_count(hevccfg->param_array);
+ for (i=0; i<count; i++) {
+ u32 k, count2;
+ GF_HEVCParamArray *s_ar;
+ GF_HEVCParamArray *ar = gf_list_get(hevccfg->param_array, i);
+ if (ar->type != GF_HEVC_NALU_VID_PARAM) continue;
+ count2 = gf_list_count(ar->nalus);
+ for (j=0; j<count2; j++) {
+ GF_AVCConfigSlot *sl = gf_list_get(ar->nalus, j);
+ u8 layer_id = ((sl->data[0] & 0x1) << 5) | (sl->data[1] >> 3);
+ if (layer_id) continue;
+
+ for (k=0; k <= max_layer_id; k++) {
+ GF_AVCConfigSlot *sl2;
+ if (!sti[k].shvccfg) continue;
+
+ s_ar = alloc_hevc_param_array(sti[k].shvccfg, ar->type);
+ s_ar->array_completeness = ar->array_completeness;
+
+ GF_SAFEALLOC(sl2, GF_AVCConfigSlot);
+ sl2->data = gf_malloc(sl->size);
+ memcpy(sl2->data, sl->data, sl->size);
+ sl2->id = sl->id;
+ sl2->size = sl->size;
+ gf_list_add(s_ar->nalus, sl2);
+ }
+ }
+ }
+
+ //update shvc config
e = gf_isom_shvc_config_update(file, track, 1, NULL, 0);
if (e) goto exit;
if (e) goto exit;
gf_isom_set_track_reference(file, sti[j].track_num, GF_4CC('s','b','a','s'), track_id);
+
+ gf_isom_set_nalu_extract_mode(file, sti[j].track_num, GF_ISOM_NALU_EXTRACT_INSPECT);
+
//get lower layer
for (k=j; k>0; k--) {
if (sti[k-1].track_num) {
exit:
//reset all scalable info
- for (j=0; j<64; j++) {
+ for (j=0; j<=max_layer_id; j++) {
if (sti[j].shvccfg) gf_odf_hevc_cfg_del(sti[j].shvccfg);
}
gf_isom_set_nalu_extract_mode(file, track, cur_extract_mode);
GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
GF_ISOSample *sample = gf_isom_get_sample(file, track, i+1, &di);
-
- data = sample->data;
+ data = (u8 *) sample->data;
size = sample->dataLength;
sample->data = NULL;
sample->dataLength = 0;
if (e)
goto err_exit;
}
- sample->data = data;
+ sample->data = (char *) data;
cur_tile = 0;
while (size) {
case GF_HEVC_NALU_SLICE_RADL_R:
case GF_HEVC_NALU_SLICE_RASL_R:
//ret = hevc_parse_slice_segment(bs, hevc, &n_state);
- e = gf_isom_append_sample_data(file, tiles_track[cur_tile], data, nalu_size + nalu_size_length);
+ e = gf_isom_append_sample_data(file, tiles_track[cur_tile], (char *) data, nalu_size + nalu_size_length);
if (e)
goto err_exit;
cur_tile++;
break;
default:
- gf_bs_write_data(bs, data, nalu_size + nalu_size_length);
+ gf_bs_write_data(bs, (char *) data, nalu_size + nalu_size_length);
break;
}
data += nalu_size + nalu_size_length;
if (e) goto err_exit;
/*copy subsample information*/
- e = gf_isom_fragment_copy_subsample(output, tf->TrackID, input, tf->OriginalTrack, tf->SampleNum + 1);
+ e = gf_isom_fragment_copy_subsample(output, tf->TrackID, input, tf->OriginalTrack, tf->SampleNum + 1, GF_FALSE);
if (e)
goto err_exit;
if (! hevc.pps[pps_idx].tiles_enabled_flag) return GF_OK;
nb_tracks = hevc.pps[pps_idx].num_tile_columns * hevc.pps[pps_idx].num_tile_rows;
- tiles_track = malloc(sizeof(u32) * nb_tracks);
+ tiles_track = gf_malloc(sizeof(u32) * nb_tracks);
for (i=0; i<nb_tracks; i++) {
gf_isom_clone_track(file, track, file, 0, &tiles_track[i] );
}
GF_ISOSample *sample = gf_isom_get_sample(file, track, i+1, &di);
- data = sample->data;
+ data = (u8 *) sample->data;
size = sample->dataLength;
sample->data = NULL;
sample->dataLength = 0;
case GF_HEVC_NALU_SLICE_RADL_R:
case GF_HEVC_NALU_SLICE_RASL_R:
//ret = hevc_parse_slice_segment(bs, hevc, &n_state);
- gf_isom_append_sample_data(file, track, data, nalu_size + nalu_size_length);
+ gf_isom_append_sample_data(file, track, (char *) data, nalu_size + nalu_size_length);
break;
default:
- gf_bs_write_data(bs, data, nalu_size + nalu_size_length);
+ gf_bs_write_data(bs, (char *) data, nalu_size + nalu_size_length);
break;
}
data += nalu_size + nalu_size_length;
if (a->sec==b->sec) return (a->nanosec<b->nanosec) ? 1 : 0;
return 1;
}
+static GFINLINE Bool gf_m2ts_time_equal(GF_M2TS_Time *a, GF_M2TS_Time *b) {
+ return ((a->sec==b->sec) && (a->nanosec == b->nanosec) );
+}
static GFINLINE Bool gf_m2ts_time_less_or_equal(GF_M2TS_Time *a, GF_M2TS_Time *b) {
if (a->sec>b->sec) return 0;
if (a->sec==b->sec) return (a->nanosec>b->nanosec) ? 0 : 1;
case GF_M2TS_AUDIO_AC3:
gf_bs_write_int(bs, GF_M2TS_REGISTRATION_DESCRIPTOR, 8);
gf_bs_write_int(bs, 4, 8);
- gf_bs_write_int(bs, 0x41, 8);
- gf_bs_write_int(bs, 0x43, 8);
- gf_bs_write_int(bs, 0x2D, 8);
- gf_bs_write_int(bs, 0x33, 8);
+ gf_bs_write_int(bs, 'A', 8);
+ gf_bs_write_int(bs, 'C', 8);
+ gf_bs_write_int(bs, '-', 8);
+ gf_bs_write_int(bs, '3', 8);
break;
case GF_M2TS_VIDEO_VC1:
gf_bs_write_int(bs, GF_M2TS_REGISTRATION_DESCRIPTOR, 8);
gf_bs_write_int(bs, 4, 8);
- gf_bs_write_int(bs, 0x56, 8);
- gf_bs_write_int(bs, 0x43, 8);
- gf_bs_write_int(bs, 0x2D, 8);
- gf_bs_write_int(bs, 0x31, 8);
+ gf_bs_write_int(bs, 'V', 8);
+ gf_bs_write_int(bs, 'C', 8);
+ gf_bs_write_int(bs, '-', 8);
+ gf_bs_write_int(bs, '1', 8);
break;
case GF_M2TS_AUDIO_EC3:
gf_bs_write_int(bs, GF_M2TS_DVB_EAC3_DESCRIPTOR, 8);
return 0;
}
-static void gf_m2ts_remap_timestamps_for_pes(GF_M2TS_Mux_Stream *stream, u32 pck_flags, u64 *dts, u64 *cts)
+static void gf_m2ts_remap_timestamps_for_pes(GF_M2TS_Mux_Stream *stream, u32 pck_flags, u64 *dts, u64 *cts, u32 *duration)
{
u64 pcr_offset;
if (stream->ts_scale) {
*cts = (u64) (stream->ts_scale * (s64) *cts);
*dts = (u64) (stream->ts_scale * (s64) *dts);
+ if (duration) *duration = (u32) (stream->ts_scale * (u32) *duration);
+
}
if (!stream->program->initial_ts_set) {
u32 nb_bits = (u32) (stream->program->mux->tot_pck_sent - stream->program->num_pck_at_pcr_init) * 1504;
else if (*dts < stream->last_dts) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS Muxer] PID %d: DTS "LLD" is less than last sent DTS "LLD"\n", stream->pid, *dts, stream->last_dts));
stream->last_dts = *dts;
+ } else {
+ stream->last_dts = *dts;
}
/*offset our timestamps*/
*dts = *dts - stream->program->initial_ts + pcr_offset;
}
+static void id3_tag_create(char **input, u32 *len)
+{
+ GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
+ gf_bs_write_u8(bs, 'I');
+ gf_bs_write_u8(bs, 'D');
+ gf_bs_write_u8(bs, '3');
+ gf_bs_write_u8(bs, 4);
+ gf_bs_write_u8(bs, 0);
+ gf_bs_write_int(bs, 0, 1);
+ gf_bs_write_int(bs, 0, 1);
+ gf_bs_write_int(bs, 0, 1);
+ gf_bs_write_int(bs, 0x1F, 5);
+ gf_bs_write_u32(bs, GF_4CC('T','X','X','X'));
+ gf_bs_write_u32(bs, *len); /* size of the text */
+ gf_bs_write_u8(bs, 0);
+ gf_bs_write_u8(bs, 0);
+ gf_bs_write_data(bs, *input, *len);
+ gf_free(*input);
+ gf_bs_get_content(bs, input, len);
+ gf_bs_del(bs);
+}
+
u32 gf_m2ts_stream_process_stream(GF_M2TS_Mux *muxer, GF_M2TS_Mux_Stream *stream)
{
Bool ret = 0;
stream->curr_pck.data = curr_pck->data;
stream->curr_pck.data_len = curr_pck->data_len;
stream->curr_pck.dts = curr_pck->dts;
+ stream->curr_pck.duration = curr_pck->duration;
stream->curr_pck.flags = curr_pck->flags;
stream->curr_pck.mpeg2_af_descriptors = curr_pck->mpeg2_af_descriptors;
stream->curr_pck.mpeg2_af_descriptors_size = curr_pck->mpeg2_af_descriptors_size;
stream->curr_pck.dts = stream->curr_pck.cts;
/*initializing the PCR*/
- if (!stream->program->pcr_init_time) {
+ if (!stream->program->pcr_init_time_set) {
if (stream==stream->program->pcr) {
if (stream->program->mux->init_pcr_value) {
- stream->program->pcr_init_time = stream->program->mux->init_pcr_value;
+ stream->program->pcr_init_time = stream->program->mux->init_pcr_value-1;
} else {
while (!stream->program->pcr_init_time)
stream->program->pcr_init_time = gf_rand();
}
-
+ stream->program->pcr_init_time_set = 1;
stream->program->ts_time_at_pcr_init = muxer->time;
stream->program->num_pck_at_pcr_init = muxer->tot_pck_sent;
/*since we reallocated the packet data buffer, force a discard in pull mode*/
stream->discard_data = 1;
break;
+ case GF_M2TS_METADATA_PES:
+ case GF_M2TS_METADATA_ID3_HLS:
+ {
+ id3_tag_create(&stream->curr_pck.data, &stream->curr_pck.data_len);
+ stream->discard_data = 1;
+ }
+ break;
}
if (stream->start_pes_at_rap && (stream->curr_pck.flags & GF_ESI_DATA_AU_RAP)
}
/*rewrite timestamps for PES header*/
- gf_m2ts_remap_timestamps_for_pes(stream, stream->curr_pck.flags, &stream->curr_pck.dts, &stream->curr_pck.cts);
+ gf_m2ts_remap_timestamps_for_pes(stream, stream->curr_pck.flags, &stream->curr_pck.dts, &stream->curr_pck.cts, &stream->curr_pck.duration);
/*compute next interesting time in TS unit: this will be DTS of next packet*/
stream->bytes_since_last_time = 0;
stream->pes_since_last_time = 0;
} else {
- u32 time_diff = (u32) (stream->curr_pck.dts - stream->last_br_time - 1 );
+ u32 time_diff = (u32) (stream->curr_pck.dts + 1 - stream->last_br_time );
if ((stream->pes_since_last_time > 4) && (time_diff >= BITRATE_UPDATE_WINDOW)) {
u32 bitrate;
u64 r = 8*stream->bytes_since_last_time;
stream->program->mux->needs_reconfig = 1;
}
}
+
+ /* in live with no fixed target rate, we have to always compute the bitrate in case we have a peak (IDR), otherwise the mux time will increase too fast and we will send packets way too fast
+ this is not perfect, we may end up with a too high stream rate and the mux time will increase too slowly, hence packet will be late*/
+ if (stream->program->mux->real_time && !stream->program->mux->fixed_rate && stream->curr_pck.duration) {
+ u64 inst_rate;
+ inst_rate = 8*stream->curr_pck.data_len;
+ inst_rate *= 90000;
+ inst_rate /= stream->curr_pck.duration;
+ inst_rate /= 8;
+ if (inst_rate>stream->bit_rate)
+ {
+ stream->bit_rate = (u32) inst_rate;
+ stream->program->mux->needs_reconfig = 1;
+ }
+ }
+
stream->pes_since_last_time ++;
return stream->scheduling_priority + stream->pcr_priority;
}
if (stream->next_payload_size) {
stream->next_payload_size += stream->reframe_overhead;
- gf_m2ts_remap_timestamps_for_pes(stream, stream->next_pck_flags, &stream->next_pck_dts, &stream->next_pck_cts);
+ gf_m2ts_remap_timestamps_for_pes(stream, stream->next_pck_flags, &stream->next_pck_dts, &stream->next_pck_cts, NULL);
}
}
return hdr_len;
}
-u32 gf_m2ts_stream_add_pes_header(GF_BitStream *bs, GF_M2TS_Mux_Stream *stream, u32 payload_length)
+u32 gf_m2ts_stream_add_pes_header(GF_BitStream *bs, GF_M2TS_Mux_Stream *stream)
{
u64 t, dts, cts;
u32 pes_len;
gf_bs_write_long_int(bs, t, 15);
gf_bs_write_int(bs, 1, 1); // marker bit
}
- GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS Muxer] PID %d: Adding PES header at PCR "LLD" - has PTS %d (%d) - has DTS %d (%d)\n", stream->pid, gf_m2ts_get_pcr(stream->program)/300, use_pts, cts, use_dts, dts));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS Muxer] PID %d: Adding PES header at PCR "LLD" - has PTS %d ("LLU") - has DTS %d ("LLU") - Payload length %d\n", stream->pid, gf_m2ts_get_pcr(stream->program)/300, use_pts, cts, use_dts, dts, pes_len));
return pes_len+4; // 4 = start code + stream_id
}
adaptation_field_control = GF_M2TS_ADAPTATION_AND_PAYLOAD;
}
if (stream->curr_pck.mpeg2_af_descriptors) {
- if (adaptation_field_control != GF_M2TS_ADAPTATION_AND_PAYLOAD) {
+ if (adaptation_field_control == GF_M2TS_ADAPTATION_NONE) {
payload_length -= 2; //AF header but no PCR
adaptation_field_control = GF_M2TS_ADAPTATION_AND_PAYLOAD;
}
else {
/*AF headers*/
if (!needs_pcr) {
- payload_length -= 2;
- adaptation_field_control = GF_M2TS_ADAPTATION_AND_PAYLOAD;
+ if (adaptation_field_control == GF_M2TS_ADAPTATION_NONE) {
+ payload_length -= 2;
+ adaptation_field_control = GF_M2TS_ADAPTATION_AND_PAYLOAD;
+ }
}
/*cannot add adaptation field for this TS packet with this payload, we need to split in 2 TS packets*/
if (payload_length < payload_to_copy + copy_next) {
stream->program->mux->tot_pes_pad_bytes += padding_length;
}
- if (hdr_len) gf_m2ts_stream_add_pes_header(bs, stream, payload_length);
+ if (hdr_len) gf_m2ts_stream_add_pes_header(bs, stream);
pos = (u32) gf_bs_get_position(bs);
gf_bs_del(bs);
// gf_m2ts_time_inc(&stream->time, payload_to_copy + pos - 4, stream->bit_rate);
if (stream->pck_offset == stream->curr_pck.data_len) {
+ if (stream->program->mux->real_time && !stream->program->mux->fixed_rate && gf_m2ts_time_less(&stream->time, &stream->program->mux->time) ) {
+ GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG2-TS Muxer] Done sending PES from PID %d TOO LATE: stream time %d:%d - mux time %d:%d (current mux rate %d) \n", stream->pid, stream->time.sec, stream->time.nanosec, stream->program->mux->time.sec, stream->program->mux->time.nanosec, stream->program->mux->bit_rate));
+ } else {
+ GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG2-TS Muxer] Done sending PES (%d bytes) from PID %d at stream time %d:%d (DTS "LLD" - PCR "LLD")\n", stream->curr_pck.data_len, stream->pid, stream->time.sec, stream->time.nanosec, stream->curr_pck.dts, gf_m2ts_get_pcr(stream->program)/300));
+ }
+
/*PES has been sent, discard internal buffer*/
if (stream->discard_data) gf_free(stream->curr_pck.data);
stream->curr_pck.data = NULL;
stream->curr_pck.data_len = 0;
stream->pck_offset = 0;
- GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG2-TS Muxer] Done sending PES (%d bytes) from PID %d at stream time %d:%d (DTS "LLD" - PCR "LLD")\n", stream->curr_pck.data_len, stream->pid, stream->time.sec, stream->time.nanosec, stream->curr_pck.dts, gf_m2ts_get_pcr(stream->program)/300));
-
#ifndef GPAC_DISABLE_LOG
if (gf_log_tool_level_on(GF_LOG_CONTAINER, GF_LOG_INFO)
&& gf_m2ts_time_less(&stream->program->mux->time, &stream->time)
GF_SAFEALLOC(stream->pck_reassembler, GF_M2TS_Packet);
stream->pck_reassembler->cts = esi_pck->cts;
stream->pck_reassembler->dts = esi_pck->dts;
+ stream->pck_reassembler->duration = esi_pck->duration;
if (esi_pck->mpeg2_af_descriptors) {
stream->pck_reassembler->mpeg2_af_descriptors = gf_realloc(stream->pck_reassembler->mpeg2_af_descriptors, sizeof(u8)* (stream->pck_reassembler->mpeg2_af_descriptors_size + esi_pck->mpeg2_af_descriptors_size) );
memcpy(stream->pck_reassembler->mpeg2_af_descriptors + stream->pck_reassembler->mpeg2_af_descriptors_size, esi_pck->mpeg2_af_descriptors, sizeof(u8)* esi_pck->mpeg2_af_descriptors_size );
gf_list_add(stream->loop_descriptors, desc);
}
+static void gf_m2ts_stream_add_metadata_pointer_descriptor(GF_M2TS_Mux_Program *program)
+{
+ GF_M2TSDescriptor *desc;
+ GF_BitStream *bs;
+ u32 data_len;
+ bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
+ gf_bs_write_u16(bs, 0xFFFF);
+ gf_bs_write_u32(bs, GF_4CC('I','D','3',' '));
+ gf_bs_write_u8(bs, 0xFF);
+ gf_bs_write_u32(bs, GF_4CC('I','D','3',' '));
+ gf_bs_write_u8(bs, 0); /* service id */
+ gf_bs_write_int(bs, 0, 1); /* locator */
+ gf_bs_write_int(bs, 0, 2); /* carriage flags */
+ gf_bs_write_int(bs, 0x1F, 5); /* reserved */
+ gf_bs_write_u16(bs, program->number);
+ GF_SAFEALLOC(desc, GF_M2TSDescriptor);
+ desc->tag = (u8) GF_M2TS_METADATA_POINTER_DESCRIPTOR;
+ gf_bs_get_content(bs, &desc->data, &data_len);
+ gf_bs_del(bs);
+ desc->data_len = (u8) data_len;
+ gf_list_add(program->loop_descriptors, desc);
+}
+
+static void gf_m2ts_stream_add_metadata_descriptor(GF_M2TS_Mux_Stream *stream)
+{
+ GF_M2TSDescriptor *desc;
+ GF_BitStream *bs;
+ u32 data_len;
+ bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
+ gf_bs_write_u16(bs, 0xFFFF);
+ gf_bs_write_u32(bs, GF_4CC('I','D','3',' '));
+ gf_bs_write_u8(bs, 0xFF);
+ gf_bs_write_u32(bs, GF_4CC('I','D','3',' '));
+ gf_bs_write_u8(bs, 0); /* service id */
+ gf_bs_write_int(bs, 0, 3); /* decoder config flags */
+ gf_bs_write_int(bs, 0, 1); /* dsmcc flag */
+ gf_bs_write_int(bs, 0xF, 4); /* reserved */
+ GF_SAFEALLOC(desc, GF_M2TSDescriptor);
+ desc->tag = (u8) GF_M2TS_METADATA_DESCRIPTOR;
+ gf_bs_get_content(bs, &desc->data, &data_len);
+ gf_bs_del(bs);
+ desc->data_len = (u8) data_len;
+ gf_list_add(stream->loop_descriptors, desc);
+}
+
GF_EXPORT
GF_M2TS_Mux_Stream *gf_m2ts_program_stream_add(GF_M2TS_Mux_Program *program, struct __elementary_stream_ifce *ifce, u32 pid, Bool is_pcr, Bool force_pes)
{
stream->mpeg2_stream_type = GF_M2TS_SYSTEMS_MPEG4_SECTIONS;
}
break;
+ case GF_STREAM_TEXT:
+ stream->mpeg2_stream_id = 0xBD;
+ stream->mpeg2_stream_type = GF_M2TS_METADATA_PES;
+ gf_m2ts_stream_add_metadata_pointer_descriptor(stream->program);
+ gf_m2ts_stream_add_metadata_descriptor(stream);
}
-
if (! (ifce->caps & GF_ESI_STREAM_WITHOUT_MPEG4_SYSTEMS)) {
/*override signaling for all streams except BIFS/OD, to use MPEG-4 PES*/
if (program->mpeg4_signaling==GF_M2TS_MPEG4_SIGNALING_FULL) {
program->mux = muxer;
program->mpeg4_signaling = mpeg4_signaling;
program->pcr_offset = pcr_offset;
-
+ program->loop_descriptors = gf_list_new();
+
program->number = program_number;
if (muxer->programs) {
GF_M2TS_Mux_Program *p = muxer->programs;
GF_Err gf_m2ts_mux_set_initial_pcr(GF_M2TS_Mux *muxer, u64 init_pcr_value)
{
if (!muxer) return GF_BAD_PARAM;
- muxer->init_pcr_value = init_pcr_value;
+ muxer->init_pcr_value = 1 + init_pcr_value;
return GF_OK;
}
if (!muxer->init_sys_time) {
//init TS time
muxer->time.sec = muxer->time.nanosec = 0;
- gf_m2ts_time_inc(&muxer->time, (u32) muxer->init_pcr_value, 27000000);
+ gf_m2ts_time_inc(&muxer->time, (u32) (muxer->init_pcr_value ? muxer->init_pcr_value-1 : 0), 27000000);
muxer->init_sys_time = now;
muxer->init_ts_time = muxer->time;
} else {
if (!flush_all_pes && muxer->force_pat)
return gf_m2ts_mux_process(muxer, status, usec_till_next);
- if (res && gf_m2ts_time_less_or_equal(&stream->time, &time)) {
- /*if same priority schedule the earliest data*/
- if (res>=highest_priority) {
+ if (res) {
+ /*always schedule the earliest data*/
+ if (gf_m2ts_time_less(&stream->time, &time)) {
highest_priority = res;
time = stream->time;
stream_to_process = stream;
goto send_pck;
#endif
}
+ else if (gf_m2ts_time_equal(&stream->time, &time)) {
+ /*if the same priority schedule base stream first*/
+ if ((res > highest_priority) || ((res == highest_priority) && !stream->ifce->depends_on_stream)){
+ highest_priority = res;
+ time = stream->time;
+ stream_to_process = stream;
+#if FORCE_PCR_FIRST
+ goto send_pck;
+#endif
+ }
+ }
}
}
nb_streams++;
if (is_mj2k)
write_jp2_file(bs, samp->data, samp->dataLength, dsi, dsi_size);
else {
+#ifndef GPAC_DISABLE_TTXT
if (is_wvtt) {
GF_Err e;
e = gf_webvtt_dump_header(out, dumper->file, track, 1);
u32 timescale = gf_isom_get_media_timescale(dumper->file, track);
gf_webvtt_dump_iso_sample(out, timescale, samp);
}
- } else {
+ } else
+#endif
+ {
gf_bs_write_data(bs, samp->data, samp->dataLength);
}
}
if (is_mj2k)
write_jp2_file(bs, samp->data, samp->dataLength, dsi, dsi_size);
else {
+#ifndef GPAC_DISABLE_TTXT
if (is_wvtt) {
GF_Err e;
e = gf_webvtt_dump_header(out, dumper->file, track, 1);
u32 timescale = gf_isom_get_media_timescale(dumper->file, track);
gf_webvtt_dump_iso_sample(out, timescale, samp);
}
- } else {
+ } else
+#endif
+ {
gf_bs_write_data(bs, samp->data, samp->dataLength);
}
}
if (is_vobsub) return gf_dump_to_vobsub(dumper, szName, track, dsi, dsi_size);
if (is_webvtt) {
+#ifndef GPAC_DISABLE_TTXT
GF_Err gf_webvtt_dump_iso_track(GF_MediaExporter *dumper, char *szName, u32 track, Bool merge);
return gf_webvtt_dump_iso_track(dumper, szName, track, (dumper->flags & GF_EXPORT_WEBVTT_NOMERGE? GF_FALSE : GF_TRUE));
+#else
+ return GF_NOT_SUPPORTED;
+#endif
}
if (qcp_type>1) {
if (layer) fprintf(vtt, "layer:%d\n", layer);
}
if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) {
+#ifndef GPAC_DISABLE_TTXT
if (isText) {
if (mstype == GF_ISOM_SUBTYPE_WVTT) {
/* Warning: Just use -raw export */
gf_webvtt_dump_header_boxed(med, esd->decoderConfig->decoderSpecificInfo->data+4, esd->decoderConfig->decoderSpecificInfo->dataLength, &headerLength);
fprintf(vtt, "text-header-length: %d\n", headerLength);
}
- } else {
+ } else
+#endif
+ {
char b64[200];
u32 size = gf_base64_encode(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, b64, 200);
useBase64 = GF_TRUE;
GF_ISOSample *samp = gf_isom_get_sample(dumper->file, track, i+1, &di);
if (!samp) break;
+#ifndef GPAC_DISABLE_TTXT
{
GF_WebVTTTimestamp start, end;
u64 dur = gf_isom_get_sample_duration(dumper->file, track, i+1);
else fprintf(vtt, "isRAP:false ");
fprintf(vtt, "\n");
}
+#endif
if (med) {
gf_fwrite(samp->data, samp->dataLength, 1, med);
} else if (dumper->flags & GF_EXPORT_WEBVTT_META_EMBEDDED) {
header_size = 0;
if (esd) {
if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) {
+#ifndef GPAC_DISABLE_TTXT
if (mstype == GF_ISOM_SUBTYPE_WVTT || mstype == GF_ISOM_SUBTYPE_STSE) {
gf_webvtt_dump_header_boxed(media,
esd->decoderConfig->decoderSpecificInfo->data+4,
esd->decoderConfig->decoderSpecificInfo->dataLength,
&header_size);
- } else {
+ } else
+#endif
+ {
gf_fwrite(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, 1, media);
header_size = esd->decoderConfig->decoderSpecificInfo->dataLength;
}
strcat(szFile, ".264");
gf_export_message(dumper, GF_OK, "Extracting H264-SVC Visual stream to h264");
break;
+ case GF_M2TS_METADATA_ID3_HLS:
+ strcat(szFile, ".txt");
+ gf_export_message(dumper, GF_OK, "Extracting ID3 tags from metadata stream");
+ break;
default:
strcat(szFile, ".raw");
gf_export_message(dumper, GF_OK, "Extracting Unknown stream to raw");
samp = gf_isom_sample_new();
samp->IsRAP = 1;
samp->dataLength = nbbytes;
- samp->data = aac_buf;
+ samp->data = (char *) aac_buf;
e = gf_isom_add_sample(import->dest, track, di, samp);
if (e) goto exit;
e = gf_isom_avc_config_new(import->dest, track, avccfg, NULL, NULL, &di);
if (e) goto exit;
+ gf_isom_set_nalu_extract_mode(import->dest, track, GF_ISOM_NALU_EXTRACT_INSPECT);
+
sample_data = NULL;
sample_is_rap = 0;
sample_has_islice = 0;
}
return NULL;
}
+
+
+static void hevc_set_parall_type(GF_HEVCConfig *hevc_cfg)
+{
+ u32 use_tiles, use_wpp, nb_pps;
+ HEVCState hevc;
+ GF_HEVCParamArray *ar = get_hevc_param_array(hevc_cfg, GF_HEVC_NALU_PIC_PARAM);
+ u32 i, count = gf_list_count(ar->nalus);
+
+ memset(&hevc, 0, sizeof(HEVCState));
+ hevc.sps_active_idx = -1;
+
+ use_tiles = 0;
+ use_wpp = 0;
+ nb_pps = 0;
+
+
+ for (i=0; i<count; i++) {
+ HEVC_PPS *pps;
+ GF_AVCConfigSlot *slc = gf_list_get(ar->nalus, i);
+ s32 idx = gf_media_hevc_read_pps(slc->data, slc->size, &hevc);
+
+ if (idx>=0) {
+ nb_pps++;
+ pps = &hevc.pps[idx];
+ if (!pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
+ use_tiles++;
+ else if (pps->entropy_coding_sync_enabled_flag && !pps->tiles_enabled_flag)
+ use_wpp++;
+ }
+ }
+ if (!use_tiles && !use_wpp) hevc_cfg->parallelismType = 1;
+ else if (!use_wpp && (use_tiles==nb_pps) ) hevc_cfg->parallelismType = 2;
+ else if (!use_tiles && (use_wpp==nb_pps) ) hevc_cfg->parallelismType = 3;
+ else hevc_cfg->parallelismType = 0;
+}
+
#endif
static GF_Err gf_import_hevc(GF_MediaImporter *import)
Bool flush_sample, flush_next_sample, is_empty_sample, sample_is_rap, sample_has_islice, first_nal, slice_is_ref, has_cts_offset, is_paff, set_subsamples, slice_force_ref;
u32 ref_frame, timescale, copy_size, size_length, dts_inc;
s32 last_poc, max_last_poc, max_last_b_poc, poc_diff, prev_last_poc, min_poc, poc_shift;
- Bool first_avc;
+ Bool first_hevc;
u32 use_opengop_gdr = 0;
u8 layer_ids[64];
shvc_cfg->non_hevc_base_layer = 0;
buffer = (char*)gf_malloc(sizeof(char) * max_size);
sample_data = NULL;
- first_avc = 1;
+ first_hevc = 1;
sei_recovery_frame_count = -1;
spss = ppss = vpss = NULL;
e = gf_isom_hevc_config_new(import->dest, track, hevc_cfg, NULL, NULL, &di);
if (e) goto exit;
+ gf_isom_set_nalu_extract_mode(import->dest, track, GF_ISOM_NALU_EXTRACT_INSPECT);
memset(layer_ids, 0, sizeof(u8)*64);
sample_data = NULL;
dst_cfg->avgFrameRate = hevc.vps[idx].rates[0].avg_pic_rate;
dst_cfg->constantFrameRate = hevc.vps[idx].rates[0].constand_pic_rate_idc;
- dst_cfg->numTemporalLayers = hevc.vps[idx].max_sub_layer;
+ dst_cfg->numTemporalLayers = hevc.vps[idx].max_sub_layers;
dst_cfg->temporalIdNested = hevc.vps[idx].temporal_id_nesting;
//TODO set scalability mask
dst_cfg->luma_bit_depth = hevc.sps[idx].bit_depth_luma;
dst_cfg->chroma_bit_depth = hevc.sps[idx].bit_depth_chroma;
- //need VUI for these ...
- //u16 min_spatial_segmentation_idc;
- //u8 parallelismType;
- //u16 avgFrameRate;
- //u8 constantFrameRate;
-
if (!spss) {
GF_SAFEALLOC(spss, GF_HEVCParamArray);
spss->nalus = gf_list_new();
goto restart_import;
}
- if (first_avc) {
- first_avc = 0;
+ if (first_hevc) {
+ first_hevc = 0;
gf_import_message(import, GF_OK, "HEVC import - frame size %d x %d at %02.3f FPS", hevc.sps[idx].width, hevc.sps[idx].height, FPS);
} else {
gf_import_message(import, GF_OK, "SHVC detected - %d x %d at %02.3f FPS", hevc.sps[idx].width, hevc.sps[idx].height, FPS);
}
if (gf_list_count(hevc_cfg->param_array) || !gf_list_count(shvc_cfg->param_array) ) {
+ hevc_set_parall_type(hevc_cfg);
gf_isom_hevc_config_update(import->dest, track, 1, hevc_cfg);
if (gf_list_count(shvc_cfg->param_array)) {
+ hevc_set_parall_type(shvc_cfg);
+
+ shvc_cfg->avgFrameRate = hevc_cfg->avgFrameRate;
+ shvc_cfg->constantFrameRate = hevc_cfg->constantFrameRate;
+ shvc_cfg->numTemporalLayers = hevc_cfg->numTemporalLayers;
+ shvc_cfg->temporalIdNested = hevc_cfg->temporalIdNested;
+
gf_isom_shvc_config_update(import->dest, track, 1, shvc_cfg, 1);
}
} else {
+ hevc_set_parall_type(shvc_cfg);
gf_isom_shvc_config_update(import->dest, track, 1, shvc_cfg, 0);
}
gf_import_message(import, GF_OK, "[MPEG-2 TS] PMT Update found - cannot import any further");
import->flags |= GF_IMPORT_DO_ABORT;
break;
+ case GF_M2TS_EVT_DURATION_ESTIMATED:
+ prog = (GF_M2TS_Program*)par;
+
+ if (import->flags & GF_IMPORT_PROBE_ONLY) {
+ import->probe_duration = ((GF_M2TS_PES_PCK *) par)->PTS;
+ //import->flags |= GF_IMPORT_DO_ABORT;
+ }
+ break;
/*case GF_M2TS_EVT_SDT_FOUND:
import->nb_progs = gf_list_count(ts->SDTs);
tsimp->nb_video++;
break;
case GF_M2TS_VIDEO_HEVC:
- import->tk_info[idx].media_type = GF_4CC('h','e','v','c');
+ case GF_M2TS_VIDEO_SHVC:
+ import->tk_info[idx].media_type = (es->stream_type==GF_M2TS_VIDEO_SHVC) ? GF_4CC('S','H','V','C') : GF_4CC('H','E','V','C');
import->tk_info[idx].type = GF_ISOM_MEDIA_VISUAL;
import->tk_info[idx].lang = pes->lang;
import->nb_tracks++;
}
import->nb_tracks++;
break;
+ case GF_M2TS_METADATA_ID3_HLS:
+ import->tk_info[idx].media_type = GF_4CC('I','D','3',' ');
+ import->tk_info[idx].type = GF_ISOM_MEDIA_META;
+ import->tk_info[idx].lang = pes->lang;
+ import->nb_tracks++;
+ break;
+ default:
+ gf_import_message(import, GF_OK, "[MPEG-2 TS] Ignoring stream of type %d", es->stream_type);
}
}
} else {
//ses = (GF_M2TS_SECTION_ES *)es;
} else {
pes = (GF_M2TS_PES *)es;
- gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_DEFAULT);
+ gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_DEFAULT_NAL);
}
mtype = stype = oti = 0;
if (!ts->has_4on2
&& (tsimp->nb_video_configured == tsimp->nb_video)
&& (tsimp->nb_audio_configured == tsimp->nb_audio)
+ && import->probe_duration
) {
import->flags |= GF_IMPORT_DO_ABORT;
}
tsimp->hevc.vps[idx].state = 2;
tsimp->hevccfg->avgFrameRate = tsimp->hevc.vps[idx].rates[0].avg_pic_rate;
tsimp->hevccfg->constantFrameRate = tsimp->hevc.vps[idx].rates[0].constand_pic_rate_idc;
- tsimp->hevccfg->numTemporalLayers = tsimp->hevc.vps[idx].max_sub_layer;
+ tsimp->hevccfg->numTemporalLayers = tsimp->hevc.vps[idx].max_sub_layers;
hevc_cfg_add_nalu(tsimp->hevccfg, nal_type, pck->data+4, pck->data_len-4);
}
return;
ts = gf_m2ts_demux_new();
ts->on_event = on_m2ts_import_data;
ts->user = &tsimp;
+ ts->file_size = fsize;
ts->dvb_h_demux = (import->flags & GF_IMPORT_MPE_DEMUX) ? 1 : 0;
break;
gf_m2ts_process_data(ts, data, size);
+ ts->nb_pck++;
if (import->flags & GF_IMPORT_DO_ABORT) break;
done += size;
if (do_import) gf_set_progress(progress, (u32) (done/1024), (u32) (fsize/1024));
if (tsimp.hevccfg) {
u32 w = ((GF_M2TS_PES*)es)->vid_w;
u32 h = ((GF_M2TS_PES*)es)->vid_h;
+ hevc_set_parall_type(tsimp.hevccfg);
gf_isom_hevc_config_update(import->dest, tsimp.track, 1, tsimp.hevccfg);
gf_isom_set_visual_info(import->dest, tsimp.track, 1, w, h);
gf_isom_set_track_layout_info(import->dest, tsimp.track, w<<16, h<<16, 0, 0, 0);
mpd->availabilityStartTime = gf_mpd_parse_date(att->value);
} else if (!strcmp(att->name, "availabilityEndTime")) {
mpd->availabilityEndTime = gf_mpd_parse_date(att->value);
+ } else if (!strcmp(att->name, "publishTime")) {
+ mpd->publishTime = gf_mpd_parse_date(att->value);
} else if (!strcmp(att->name, "mediaPresentationDuration")) {
mpd->media_presentation_duration = gf_mpd_parse_duration(att->value);
} else if (!strcmp(att->name, "minimumUpdatePeriod")) {
case GF_M2TS_SYSTEMS_MPEG4_SECTIONS: return "MPEG-4 SL (Section)";
case GF_M2TS_MPE_SECTIONS: return "MPE (Section)";
+ case GF_M2TS_METADATA_PES: return "Metadata (PES)";
+ case GF_M2TS_METADATA_ID3_HLS: return "ID3/HLS Metadata (PES)";
+
default: return "Unknown";
}
}
u64 file_size = 0;
// if (ts->duration>0) return;
- if (ts->file) {
+ if (ts->file || ts->file_size) {
file_size = ts->file_size;
} else if (ts->dnload) {
u32 size;
}
}
-static u32 gf_m2ts_reframe_default(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len)
+static u32 gf_m2ts_reframe_default(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
GF_M2TS_PES_PCK pck;
pck.flags = 0;
return 0;
}
-static u32 gf_m2ts_reframe_reset(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len)
+static u32 gf_m2ts_reframe_reset(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
- if (pes->data) {
- gf_free(pes->data);
- pes->data = NULL;
+ if (pes->pck_data) {
+ gf_free(pes->pck_data);
+ pes->pck_data = NULL;
}
- pes->data_len = 0;
+ pes->pck_data_len = pes->pck_alloc_len = 0;
if (pes->prev_data) {
gf_free(pes->prev_data);
pes->prev_data = NULL;
pes->prev_PTS = 0;
pes->reframe = NULL;
pes->cc = -1;
+ pes->temi_tc_desc_len = 0;
return 0;
}
-static u32 gf_m2ts_reframe_nalu_video(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, Bool is_hevc)
+static u32 gf_m2ts_reframe_nalu_video(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr, Bool is_hevc)
{
Bool au_start_in_pes=0;
Bool prev_is_au_delim=0;
Bool short_start_code = 0;
Bool esc_code_found = 0;
u32 nal_type, sc_pos = 0;
-
+ u32 first_nal_offset_in_pck = 0;
+ Bool full_au_pes_mode = 0;
+ u8 *au_start = NULL;
GF_M2TS_PES_PCK pck;
if (!same_pts)
}
data += sc_pos;
+ first_nal_offset_in_pck += sc_pos;
data_len -= sc_pos;
}
start_code_found = short_start_code ? 2 : 1;
}
}
#endif
- /*check AU start type*/
- if (nal_type==GF_HEVC_NALU_ACCESS_UNIT) {
+ /*check AU start type - if this is an SHVC PID and the first nal is the first byte of the PES payload, consider this is an AU start*/
+ if ((nal_type==GF_HEVC_NALU_ACCESS_UNIT) || (pes->depends_on_pid && !first_nal_offset_in_pck)) {
if (!prev_is_au_delim) {
+ //this was not a one AU per PES config, dispatch
+ if (au_start) {
+ pck.data = (char *)au_start;
+ pck.data_len = (u32) (data - au_start);
+ ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ au_start = NULL;
+ full_au_pes_mode = 0;
+ }
+
if (au_start_in_pes) {
/*FIXME - we should check the AVC framerate to update the timing ...*/
pck.DTS += 3000;
pck.PTS += 3000;
// GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID%d: Two AVC AUs start in this PES packet - cannot recompute non-first AU timing\n", pes->pid));
}
+
pck.flags = GF_M2TS_PES_PCK_AU_START;
force_new_au = 0;
au_start_in_pes = 1;
- ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ if (pes_hdr->data_alignment && !first_nal_offset_in_pck && !pes->single_nal_mode) {
+ full_au_pes_mode = GF_TRUE;
+ au_start = (u8 *) pck.data;
+ } else {
+ ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ }
prev_is_au_delim=1;
}
- } else if ((nal_type==GF_HEVC_NALU_SLICE_IDR_W_DLP)
- || (nal_type==GF_HEVC_NALU_SLICE_IDR_N_LP)
- ) {
- pck.flags = GF_M2TS_PES_PCK_RAP;
- ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ } else if ((nal_type>=GF_HEVC_NALU_SLICE_BLA_W_LP) && (nal_type<=GF_HEVC_NALU_SLICE_CRA)) {
+ if (!full_au_pes_mode) {
+ pck.flags = GF_M2TS_PES_PCK_RAP;
+ ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ } else {
+ pck.flags |= GF_M2TS_PES_PCK_RAP;
+ }
prev_is_au_delim=0;
}
else
#endif //GPAC_DISABLE_HEVC
{
- pck.flags = 0;
- ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ if (!full_au_pes_mode) {
+ pck.flags = 0;
+ ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ }
prev_is_au_delim=0;
}
} else {
/*check AU start type*/
if ((nal_type==GF_AVC_NALU_ACCESS_UNIT) || (nal_type==GF_AVC_NALU_VDRD)) {
if (!prev_is_au_delim) {
+
+ //this was not a one AU per PES config, dispatch
+ if (au_start) {
+ pck.data = (char *)au_start;
+ pck.data_len = (u32) (data - au_start);
+ ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ au_start = NULL;
+ full_au_pes_mode = 0;
+ }
+
if (au_start_in_pes) {
/*FIXME - we should check the AVC framerate to update the timing ...*/
pck.DTS += 3000;
pck.flags = GF_M2TS_PES_PCK_AU_START;
force_new_au = 0;
au_start_in_pes = 1;
- ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ if (pes_hdr->data_alignment && !first_nal_offset_in_pck && !pes->single_nal_mode) {
+ full_au_pes_mode = GF_TRUE;
+ au_start = (u8 *) pck.data;
+ } else {
+ ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ }
prev_is_au_delim=1;
}
} else {
- pck.flags = (nal_type==GF_AVC_NALU_IDR_SLICE) ? GF_M2TS_PES_PCK_RAP : 0;
- ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ if (!full_au_pes_mode) {
+ pck.flags = (nal_type==GF_AVC_NALU_IDR_SLICE) ? GF_M2TS_PES_PCK_RAP : 0;
+ ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ } else {
+ if (nal_type==GF_AVC_NALU_IDR_SLICE) pck.flags |= GF_M2TS_PES_PCK_RAP;
+ }
prev_is_au_delim=0;
}
}
data += sc_pos;
data_len -= sc_pos;
+ first_nal_offset_in_pck += sc_pos;
sc_pos = 0;
if (esc_code_found) {
/*we did not consume all data*/
if (!start_code_found) {
u32 min_size = is_hevc ? 6 : 5;
+
+ if (au_start) {
+ pck.data = (char *)au_start;
+ pck.data_len = (u32) (data - au_start);
+ ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ au_start = NULL;
+ }
+
/*if not enough data to locate start code, store it*/
if (data_len < min_size )
return data_len;
/*otherwise this is the middle of a frame, let's dispatch it*/
}
+ if (au_start) {
+ if (is_hevc) {
+#ifndef GPAC_DISABLE_HEVC
+ nal_type = (data[4] & 0x7E) >> 1;
+ if ((nal_type>=GF_HEVC_NALU_SLICE_BLA_W_LP) && (nal_type<=GF_HEVC_NALU_SLICE_CRA)) {
+ pck.flags |= GF_M2TS_PES_PCK_RAP;
+ }
+#endif
+ } else {
+ nal_type = data[4] & 0x1F;
+ if (nal_type==GF_AVC_NALU_IDR_SLICE) pck.flags |= GF_M2TS_PES_PCK_RAP;
+ }
+
+ pck.data = (char *)au_start;
+ pck.data_len = (u32) (data - au_start) + data_len;
+ ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ return 0;
+ }
+
if (data_len) {
pck.flags = 0;
pck.data = (char *)data;
return 0;
}
-static u32 gf_m2ts_reframe_avc_h264(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len)
+static u32 gf_m2ts_reframe_avc_h264(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
- return gf_m2ts_reframe_nalu_video(ts, pes, same_pts, data, data_len, 0);
+ return gf_m2ts_reframe_nalu_video(ts, pes, same_pts, data, data_len, pes_hdr, 0);
}
-static u32 gf_m2ts_reframe_hevc(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len)
+static u32 gf_m2ts_reframe_hevc(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
- return gf_m2ts_reframe_nalu_video(ts, pes, same_pts, data, data_len, 1);
+ return gf_m2ts_reframe_nalu_video(ts, pes, same_pts, data, data_len, pes_hdr, 1);
}
-static u32 gf_m2ts_reframe_mpeg_video(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len)
+static u32 gf_m2ts_reframe_mpeg_video(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
u32 sc_pos = 0;
u32 to_send = data_len;
u32 profile, sr_idx, nb_ch, frame_size;
} ADTSHeader;
-static u32 gf_m2ts_reframe_aac_adts(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len)
+static u32 gf_m2ts_reframe_aac_adts(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
ADTSHeader hdr;
u32 sc_pos = 0;
return data_len - sc_pos;
}
-static u32 gf_m2ts_reframe_aac_latm(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len)
+static u32 gf_m2ts_reframe_aac_latm(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
u32 sc_pos = 0;
u32 start = 0;
#ifndef GPAC_DISABLE_AV_PARSERS
-static u32 gf_m2ts_reframe_mpeg_audio(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len)
+static u32 gf_m2ts_reframe_mpeg_audio(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
{
GF_M2TS_PES_PCK pck;
u32 pos, frame_size, remain;
/*we consumed all data*/
return 0;
}
+
#endif /*GPAC_DISABLE_AV_PARSERS*/
+typedef enum {
+ ID3V2_FRAME_AENC = GF_4CC('A','E','N','C'),
+ ID3V2_FRAME_APIC = GF_4CC('A','P','I','C'),
+ ID3V2_FRAME_COMM = GF_4CC('C','O','M','M'),
+ ID3V2_FRAME_COMR = GF_4CC('C','O','M','R'),
+ ID3V2_FRAME_ENCR = GF_4CC('E','N','C','R'),
+ ID3V2_FRAME_EQUA = GF_4CC('E','Q','U','A'),
+ ID3V2_FRAME_ETCO = GF_4CC('E','T','C','O'),
+ ID3V2_FRAME_GEOB = GF_4CC('G','E','O','B'),
+ ID3V2_FRAME_GRID = GF_4CC('G','R','I','D'),
+ ID3V2_FRAME_IPLS = GF_4CC('I','P','L','S'),
+ ID3V2_FRAME_LINK = GF_4CC('L','I','N','K'),
+ ID3V2_FRAME_MCDI = GF_4CC('M','C','D','I'),
+ ID3V2_FRAME_MLLT = GF_4CC('M','L','L','T'),
+ ID3V2_FRAME_OWNE = GF_4CC('O','W','N','E'),
+ ID3V2_FRAME_PRIV = GF_4CC('P','R','I','V'),
+ ID3V2_FRAME_PCNT = GF_4CC('P','C','N','T'),
+ ID3V2_FRAME_POPM = GF_4CC('P','O','P','M'),
+ ID3V2_FRAME_POSS = GF_4CC('P','O','S','S'),
+ ID3V2_FRAME_RBUF = GF_4CC('R','B','U','F'),
+ ID3V2_FRAME_RVAD = GF_4CC('R','V','A','D'),
+ ID3V2_FRAME_RVRB = GF_4CC('R','V','R','B'),
+ ID3V2_FRAME_SYLT = GF_4CC('S','Y','L','T'),
+ ID3V2_FRAME_SYTC = GF_4CC('S','Y','T','C'),
+ ID3V2_FRAME_TALB = GF_4CC('T','E','N','C'),
+ ID3V2_FRAME_TBPM = GF_4CC('T','B','P','M'),
+ ID3V2_FRAME_TCOM = GF_4CC('T','C','O','M'),
+ ID3V2_FRAME_TCON = GF_4CC('T','C','O','N'),
+ ID3V2_FRAME_TCOP = GF_4CC('T','C','O','P'),
+ ID3V2_FRAME_TDAT = GF_4CC('T','D','A','T'),
+ ID3V2_FRAME_TDLY = GF_4CC('T','D','L','Y'),
+ ID3V2_FRAME_TENC = GF_4CC('T','E','N','C'),
+ ID3V2_FRAME_TEXT = GF_4CC('T','E','X','T'),
+ ID3V2_FRAME_TFLT = GF_4CC('T','F','L','T'),
+ ID3V2_FRAME_TIME = GF_4CC('T','I','M','E'),
+ ID3V2_FRAME_TIT1 = GF_4CC('T','I','T','1'),
+ ID3V2_FRAME_TIT2 = GF_4CC('T','I','T','2'),
+ ID3V2_FRAME_TIT3 = GF_4CC('T','I','T','3'),
+ ID3V2_FRAME_TKEY = GF_4CC('T','K','E','Y'),
+ ID3V2_FRAME_TLAN = GF_4CC('T','L','A','N'),
+ ID3V2_FRAME_TLEN = GF_4CC('T','L','E','N'),
+ ID3V2_FRAME_TMED = GF_4CC('T','M','E','D'),
+ ID3V2_FRAME_TOAL = GF_4CC('T','O','A','L'),
+ ID3V2_FRAME_TOFN = GF_4CC('T','O','F','N'),
+ ID3V2_FRAME_TOLY = GF_4CC('T','O','L','Y'),
+ ID3V2_FRAME_TOPE = GF_4CC('T','O','P','E'),
+ ID3V2_FRAME_TORY = GF_4CC('T','O','R','Y'),
+ ID3V2_FRAME_TOWN = GF_4CC('T','O','W','N'),
+ ID3V2_FRAME_TPE1 = GF_4CC('T','P','E','1'),
+ ID3V2_FRAME_TPE2 = GF_4CC('T','P','E','2'),
+ ID3V2_FRAME_TPE3 = GF_4CC('T','P','E','3'),
+ ID3V2_FRAME_TPE4 = GF_4CC('T','P','E','4'),
+ ID3V2_FRAME_TPOS = GF_4CC('T','P','E','5'),
+ ID3V2_FRAME_TPUB = GF_4CC('T','P','U','B'),
+ ID3V2_FRAME_TRCK = GF_4CC('T','R','C','K'),
+ ID3V2_FRAME_TRDA = GF_4CC('T','R','D','A'),
+ ID3V2_FRAME_TRSN = GF_4CC('T','R','S','N'),
+ ID3V2_FRAME_TRSO = GF_4CC('T','R','S','O'),
+ ID3V2_FRAME_TSIZ = GF_4CC('T','S','I','Z'),
+ ID3V2_FRAME_TSRC = GF_4CC('T','S','R','C'),
+ ID3V2_FRAME_TSSE = GF_4CC('T','S','S','E'),
+ ID3V2_FRAME_TYER = GF_4CC('T','Y','E','R'),
+ ID3V2_FRAME_TXXX = GF_4CC('T','X','X','X'),
+ ID3V2_FRAME_UFID = GF_4CC('U','F','I','D'),
+ ID3V2_FRAME_USER = GF_4CC('U','S','E','R'),
+ ID3V2_FRAME_USLT = GF_4CC('U','S','L','T'),
+ ID3V2_FRAME_WCOM = GF_4CC('W','C','O','M'),
+ ID3V2_FRAME_WCOP = GF_4CC('W','C','O','P'),
+ ID3V2_FRAME_WOAF = GF_4CC('W','O','A','F'),
+ ID3V2_FRAME_WOAR = GF_4CC('W','O','A','R'),
+ ID3V2_FRAME_WOAS = GF_4CC('W','O','A','S'),
+ ID3V2_FRAME_WORS = GF_4CC('W','O','R','S'),
+ ID3V2_FRAME_WPAY = GF_4CC('W','P','A','Y'),
+ ID3V2_FRAME_WPUB = GF_4CC('W','P','U','B'),
+ ID3V2_FRAME_WXXX = GF_4CC('W','X','X','X')
+} GF_ID3v2FrameType;
+
+static void add_text(char **buffer, u32 *size, u32 *pos, char *msg, u32 msg_len)
+{
+ if (*pos+msg_len>*size) {
+ *size = *pos+msg_len-*size+256;
+ *buffer = (char *)gf_realloc(*buffer, *size);
+ }
+ strncpy((*buffer)+(*pos), msg, msg_len);
+ *pos += msg_len;
+}
+
+static GF_Err id3_parse_tag(char *data, u32 length, char **output, u32 *output_size, u32 *output_pos)
+{
+ u32 size;
+ u32 pos = 0;
+ /* ID3VVFFFFSIZE = 13bytes
+ * ID3 string
+ * VV = Version
+ * F = Flags
+ * SIZE = 32bits size with first Most Significant bit set to 0 -> 28 bits
+ * Size starts AFTER this header, meaning we have to add 10 bytes
+ */
+ if (data[pos] == 'I' && data[pos+1] == 'D' && data[pos+2] == '3') {
+ //u16 version = (data[pos+3]<<8)+data[pos+4];
+ //Bool unsync_flag = ((data[pos+5]>>7 & 0x1) ? GF_TRUE: GF_FALSE);
+ Bool extended_header_flag = ((data[pos+5]>>6 & 0x1) ? GF_TRUE: GF_FALSE);
+ //Bool experimental_flag = ((data[pos+5]>>5 & 0x1) ? GF_TRUE: GF_FALSE);
+ if (data[pos+5] & 0x1F) {
+ return GF_NOT_SUPPORTED;
+ } else {
+ size = 10 + ((data[pos+9] & 0x7f) + ((data[pos+8] & 0x7f) << 7) + ((data[pos+7] & 0x7f) << 14) + ((data[pos+6] & 0x7f) << 21));
+ }
+ pos += 10;
+
+ if (extended_header_flag) {
+ u32 extended_size = 4 + ((data[pos]) + ((data[pos+1]) << 8) + ((data[pos+2]) << 16) + ((data[pos+3]) << 24));
+ pos += extended_size;
+ }
+
+ while (pos < size) {
+ GF_ID3v2FrameType type;
+ u32 frame_size, frame_pos;
+ Bool compression_flag, encryption_flag/*, tag_alter_preservation_flag, file_alter_preservation_flag, readonly_flag, grouping_flag*/;
+ frame_pos = pos;
+ /* parsing a frame */
+ type = (GF_ID3v2FrameType)(((data[pos+3]) + ((data[pos+2]) << 8) + ((data[pos+1]) << 16) + ((data[pos]) << 24)));
+ pos+=4;
+ frame_size = 10 + ((data[pos+3]) + ((data[pos+2]) << 8) + ((data[pos+1]) << 16) + ((data[pos]) << 24));
+ pos+=4;
+ //tag_alter_preservation_flag = ((data[pos]>>7 & 0x1) ? GF_TRUE: GF_FALSE);
+ //file_alter_preservation_flag = ((data[pos]>>6 & 0x1) ? GF_TRUE: GF_FALSE);
+ //readonly_flag = ((data[pos]>>5 & 0x1) ? GF_TRUE: GF_FALSE);
+ pos++;
+ compression_flag = ((data[pos]>>7 & 0x1) ? GF_TRUE: GF_FALSE);
+ encryption_flag = ((data[pos]>>6 & 0x1) ? GF_TRUE: GF_FALSE);
+ //grouping_flag = ((data[pos]>>5 & 0x1) ? GF_TRUE: GF_FALSE);
+ if (compression_flag || encryption_flag) {
+ /* unsupported, skip */
+ pos = frame_pos + frame_size;
+ } else {
+ switch (type) {
+ case ID3V2_FRAME_TXXX:
+ add_text(output, output_size, output_pos, data+pos+3, frame_size-10-3);
+ pos = frame_pos + frame_size;
+ break;
+ default:
+ /* unsupported, skip */
+ pos = frame_pos + frame_size;
+ }
+ }
+ }
+
+ return GF_OK;
+ } else {
+ return GF_NOT_SUPPORTED;
+ }
+}
+
+static u32 gf_m2ts_reframe_id3_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr)
+{
+ char frame_header[256];
+ char *output_text = NULL;
+ u32 output_len = 0;
+ u32 pos = 0;
+ GF_M2TS_PES_PCK pck;
+ pck.flags = 0;
+ if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP;
+ if (!same_pts) pck.flags |= GF_M2TS_PES_PCK_AU_START;
+ pck.DTS = pes->DTS;
+ pck.PTS = pes->PTS;
+ sprintf(frame_header, LLU" --> NEXT\n", pes->PTS);
+ add_text(&output_text, &output_len, &pos, frame_header, (u32)strlen(frame_header));
+ id3_parse_tag((char *)data, data_len, &output_text, &output_len, &pos);
+ add_text(&output_text, &output_len, &pos, "\n\n", 2);
+ pck.data = (char *)output_text;
+ pck.data_len = pos;
+ pck.stream = pes;
+ ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck);
+ gf_free(output_text);
+ /*we consumed all data*/
+ return 0;
+}
static u32 gf_m2ts_sync(GF_M2TS_Demuxer *ts, Bool simple_check)
{
if ((pes->flags & GF_M2TS_INHERIT_PCR) && ts->ess[es->program->pcr_pid]==es)
ts->ess[es->program->pcr_pid] = NULL;
- if (pes->data) gf_free(pes->data);
+ if (pes->pck_data) gf_free(pes->pck_data);
if (pes->prev_data) gf_free(pes->prev_data);
if (pes->buf) gf_free(pes->buf);
+ if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc);
}
if (es->slcfg) gf_free(es->slcfg);
gf_free(es);
static void gf_m2ts_section_complete(GF_M2TS_Demuxer *ts, GF_M2TS_SectionFilter *sec, GF_M2TS_SECTION_ES *ses)
{
+ //seek mode, only process PAT and PMT
+ if (ts->start_range && (sec->section[0] != GF_M2TS_TABLE_ID_PAT) && (sec->section[0] != GF_M2TS_TABLE_ID_PMT)) {
+ /*clean-up (including broken sections)*/
+ if (sec->section) gf_free(sec->section);
+ sec->section = NULL;
+ sec->length = sec->received = 0;
+ return;
+ }
+
if (!sec->process_section) {
if ((ts->on_event && (sec->section[0]==GF_M2TS_TABLE_ID_AIT)) ) {
#ifdef GPAC_ENABLE_DSMCC
return;
}
+static GF_M2TS_MetadataPointerDescriptor *gf_m2ts_read_metadata_pointer_descriptor(GF_BitStream *bs, u32 length)
+{
+ u32 size;
+ GF_M2TS_MetadataPointerDescriptor *d;
+ GF_SAFEALLOC(d, GF_M2TS_MetadataPointerDescriptor);
+ d->application_format = gf_bs_read_u16(bs);
+ size = 2;
+ if (d->application_format == 0xFFFF) {
+ d->application_format_identifier = gf_bs_read_u32(bs);
+ size += 4;
+ }
+ d->format = gf_bs_read_u8(bs);
+ size += 1;
+ if (d->format == 0xFF) {
+ d->format_identifier = gf_bs_read_u32(bs);
+ size += 4;
+ }
+ d->service_id = gf_bs_read_u8(bs);
+ d->locator_record_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE);
+ d->carriage_flag = (enum metadata_carriage)gf_bs_read_int(bs, 2);
+ gf_bs_read_int(bs, 5); /*reserved */
+ size += 2;
+ if (d->locator_record_flag) {
+ d->locator_length = gf_bs_read_u8(bs);
+ d->locator_data = (char *)gf_malloc(d->locator_length);
+ size += 1 + d->locator_length;
+ gf_bs_read_data(bs, d->locator_data, d->locator_length);
+ }
+ if (d->carriage_flag != 3) {
+ d->program_number = gf_bs_read_u16(bs);
+ size += 2;
+ }
+ if (d->carriage_flag == 1) {
+ d->ts_location = gf_bs_read_u16(bs);
+ d->ts_id = gf_bs_read_u16(bs);
+ size += 4;
+ }
+ if (length-size > 0) {
+ d->data_size = length-size;
+ d->data = (char *)gf_malloc(d->data_size);
+ gf_bs_read_data(bs, d->data, d->data_size);
+ }
+ return d;
+}
+
+void gf_m2ts_metadata_pointer_descriptor_del(GF_M2TS_MetadataPointerDescriptor *metapd)
+{
+ if (metapd) {
+ if (metapd->locator_data) gf_free(metapd->locator_data);
+ if (metapd->data) gf_free(metapd->data);
+ gf_free(metapd);
+ }
+}
+
+static GF_M2TS_MetadataDescriptor *gf_m2ts_read_metadata_descriptor(GF_BitStream *bs, u32 length)
+{
+ u32 size;
+ GF_M2TS_MetadataDescriptor *d;
+ GF_SAFEALLOC(d, GF_M2TS_MetadataDescriptor);
+ d->application_format = gf_bs_read_u16(bs);
+ size = 2;
+ if (d->application_format == 0xFFFF) {
+ d->application_format_identifier = gf_bs_read_u32(bs);
+ size += 4;
+ }
+ d->format = gf_bs_read_u8(bs);
+ size += 1;
+ if (d->format == 0xFF) {
+ d->format_identifier = gf_bs_read_u32(bs);
+ size += 4;
+ }
+ d->service_id = gf_bs_read_u8(bs);
+ d->decoder_config_flags = gf_bs_read_int(bs, 3);
+ d->dsmcc_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE);
+ gf_bs_read_int(bs, 4); /* reserved */
+ size += 2;
+ if (d->dsmcc_flag) {
+ d->service_id_record_length = gf_bs_read_u8(bs);
+ d->service_id_record = (char *)gf_malloc(d->service_id_record_length);
+ size += 1 + d->service_id_record_length;
+ gf_bs_read_data(bs, d->service_id_record, d->service_id_record_length);
+ }
+ if (d->decoder_config_flags == 1) {
+ d->decoder_config_length = gf_bs_read_u8(bs);
+ d->decoder_config = (char *)gf_malloc(d->decoder_config_length);
+ size += 1 + d->decoder_config_length;
+ gf_bs_read_data(bs, d->decoder_config, d->decoder_config_length);
+ }
+ if (d->decoder_config_flags == 3) {
+ d->decoder_config_id_length = gf_bs_read_u8(bs);
+ d->decoder_config_id = (char *)gf_malloc(d->decoder_config_id_length);
+ size += 1 + d->decoder_config_id_length;
+ gf_bs_read_data(bs, d->decoder_config_id, d->decoder_config_id_length);
+ }
+ if (d->decoder_config_flags == 4) {
+ d->decoder_config_service_id = gf_bs_read_u8(bs);
+ size++;
+ }
+ return d;
+}
+
+void gf_m2ts_metadata_descriptor_del(GF_M2TS_MetadataDescriptor *metad)
+{
+ if (metad) {
+ if (metad->service_id_record) gf_free(metad->service_id_record);
+ if (metad->decoder_config) gf_free(metad->decoder_config);
+ if (metad->decoder_config_id) gf_free(metad->decoder_config_id);
+ gf_free(metad);
+ }
+}
+
static void gf_m2ts_process_pmt(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *pmt, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status)
{
u32 info_length, pos, desc_len, evt_type, nb_es,i;
gf_odf_desc_del((GF_Descriptor *)pmt->program->pmt_iod);
pmt->program->pmt_iod = NULL;
}
+ } else if (tag == GF_M2TS_METADATA_POINTER_DESCRIPTOR) {
+ GF_BitStream *metadatapd_bs;
+ GF_M2TS_MetadataPointerDescriptor *metapd;
+ metadatapd_bs = gf_bs_new((char *)data+6, len, GF_BITSTREAM_READ);
+ metapd = gf_m2ts_read_metadata_pointer_descriptor(metadatapd_bs, len);
+ gf_bs_del(metadatapd_bs);
+ if (metapd->application_format_identifier == GF_4CC('I', 'D', '3', ' ') &&
+ metapd->format_identifier == GF_4CC('I', 'D', '3', ' ') &&
+ metapd->carriage_flag == METADATA_CARRIAGE_SAME_TS) {
+ /*HLS ID3 Metadata */
+ pmt->program->metadata_pointer_descriptor = metapd;
+ } else {
+ /* don't know what to do with it for now, delete */
+ gf_m2ts_metadata_pointer_descriptor_del(metapd);
+ }
} else {
#else
{
case GF_M2TS_AUDIO_AC3:
case GF_M2TS_AUDIO_DTS:
case GF_M2TS_SUBTITLE_DVB:
+ case GF_M2TS_METADATA_PES:
GF_SAFEALLOC(pes, GF_M2TS_PES);
pes->cc = -1;
pes->flags = GF_M2TS_ES_IS_PES;
reg_desc_format = GF_4CC(data[2], data[3], data[4], data[5]);
/*cf http://www.smpte-ra.org/mpegreg/mpegreg.html*/
switch (reg_desc_format) {
- case GF_4CC(0x41, 0x43, 0x2D, 0x33):
+ case GF_4CC('A', 'C', '-', '3'):
es->stream_type = GF_M2TS_AUDIO_AC3;
break;
- case GF_4CC(0x56, 0x43, 0x2D, 0x31):
+ case GF_4CC('V', 'C', '-', '1'):
es->stream_type = GF_M2TS_VIDEO_VC1;
break;
}
if (pes)
pes->depends_on_pid = (data[4] & 0x3F) + es->program->pmt_pid;
break;
+ case GF_M2TS_METADATA_DESCRIPTOR:
+ {
+ GF_BitStream *metadatad_bs;
+ GF_M2TS_MetadataDescriptor *metad;
+ metadatad_bs = gf_bs_new((char *)data+2, len, GF_BITSTREAM_READ);
+ metad = gf_m2ts_read_metadata_descriptor(metadatad_bs, len);
+ gf_bs_del(metadatad_bs);
+ if (metad->application_format_identifier == GF_4CC('I', 'D', '3', ' ') &&
+ metad->format_identifier == GF_4CC('I', 'D', '3', ' ')) {
+ /*HLS ID3 Metadata */
+ pes->metadata_descriptor = metad;
+ pes->stream_type = GF_M2TS_METADATA_ID3_HLS;
+ } else {
+ /* don't know what to do with it for now, delete */
+ gf_m2ts_metadata_descriptor_del(metad);
+ }
+ }
+ break;
+
default:
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] skipping descriptor (0x%x) not supported\n", tag));
break;
}
}
+static void gf_m2ts_flush_temi(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes)
+{
+ GF_M2TS_TemiTimecodeDescriptor temi_tc;
+ GF_BitStream *bs = gf_bs_new(pes->temi_tc_desc, pes->temi_tc_desc_len, GF_BITSTREAM_READ);
+ u32 has_timestamp = gf_bs_read_int(bs, 2);
+ u32 has_ntp = gf_bs_read_int(bs, 1);
+ u32 has_ptp = gf_bs_read_int(bs, 1);
+ u32 has_timecode = gf_bs_read_int(bs, 2);
+
+ memset(&temi_tc, 0, sizeof(GF_M2TS_TemiTimecodeDescriptor));
+ temi_tc.force_reload = gf_bs_read_int(bs, 1);
+ temi_tc.is_paused = gf_bs_read_int(bs, 1);
+ temi_tc.is_discontinuity = gf_bs_read_int(bs, 1);
+ gf_bs_read_int(bs, 7);
+ temi_tc.timeline_id = gf_bs_read_int(bs, 8);
+ if (has_timestamp) {
+ temi_tc.media_timescale = gf_bs_read_u32(bs);
+ if (has_timestamp==2)
+ temi_tc.media_timestamp = gf_bs_read_u64(bs);
+ else
+ temi_tc.media_timestamp = gf_bs_read_u32(bs);
+ }
+ temi_tc.pes_pts = pes->PTS;
+ gf_bs_del(bs);
+ pes->temi_tc_desc_len = 0;
+ ts->on_event(ts, GF_M2TS_EVT_TEMI_TIMECODE, &temi_tc);
+}
+
static void gf_m2ts_flush_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes)
{
GF_M2TS_PESHeader pesh;
/*we need at least a full, valid start code !!*/
- if ((pes->data_len >= 4) && !pes->data[0] && !pes->data[1] && (pes->data[2]==0x1)) {
+ if ((pes->pck_data_len >= 4) && !pes->pck_data[0] && !pes->pck_data[1] && (pes->pck_data[2]==0x1)) {
u32 len;
- u32 stream_id = pes->data[3] | 0x100;
+ u32 stream_id = pes->pck_data[3] | 0x100;
if ((stream_id >= 0x1c0 && stream_id <= 0x1df) ||
(stream_id >= 0x1e0 && stream_id <= 0x1ef) ||
(stream_id == 0x1bd) ||
+ (stream_id == 0x10d) ||
/*SL-packetized*/
- ((u8) pes->data[3]==0xfa)
+ ((u8) pes->pck_data[3]==0xfa)
) {
Bool same_pts = 0;
/*OK read header*/
- gf_m2ts_pes_header(pes, pes->data+3, pes->data_len-3, &pesh);
+ gf_m2ts_pes_header(pes, pes->pck_data+3, pes->pck_data_len-3, &pesh);
/*send PES timing*/
if (ts->notify_pes_timing) {
/*3-byte start-code + 6 bytes header + hdr extensions*/
len = 9 + pesh.hdr_data_len;
- if ((u8) pes->data[3]==0xfa) {
+ if ((u8) pes->pck_data[3]==0xfa) {
GF_M2TS_SL_PCK sl_pck;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] SL Packet in PES for %d - ES ID %d\n", pes->pid, pes->mpeg4_es_id));
- if (pes->data_len > len) {
- sl_pck.data = (char *)pes->data + len;
- sl_pck.data_len = pes->data_len - len;
+ if (pes->pck_data_len > len) {
+ sl_pck.data = (char *)pes->pck_data + len;
+ sl_pck.data_len = pes->pck_data_len - len;
sl_pck.stream = (GF_M2TS_ES *)pes;
if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SL_PCK, &sl_pck);
} else {
- GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Bad SL Packet size: (%d indicated < %d header)\n", pes->pid, pes->data_len, len));
+ GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Bad SL Packet size: (%d indicated < %d header)\n", pes->pid, pes->pck_data_len, len));
}
} else if (pes->reframe) {
- u32 remain;
+ u32 remain = 0;
u32 offset = len;
- if (pesh.pck_len && (pesh.pck_len-3-pesh.hdr_data_len != pes->data_len-len)) {
- GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES payload size %d but received %d bytes\n", pes->pid, (u32) ( pesh.pck_len-3-pesh.hdr_data_len), pes->data_len-len));
+ if (pesh.pck_len && (pesh.pck_len-3-pesh.hdr_data_len != pes->pck_data_len-len)) {
+ GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES payload size %d but received %d bytes\n", pes->pid, (u32) ( pesh.pck_len-3-pesh.hdr_data_len), pes->pck_data_len-len));
}
-
+ //copy over the remaining of previous PES payload before start of this PES payload
if (pes->prev_data_len) {
assert(pes->prev_data_len < len);
offset = len - pes->prev_data_len;
- memcpy(pes->data + offset, pes->prev_data, pes->prev_data_len);
+ memcpy(pes->pck_data + offset, pes->prev_data, pes->prev_data_len);
}
- remain = pes->reframe(ts, pes, same_pts, pes->data+offset, pes->data_len-offset);
+ if (pes->temi_tc_desc_len)
+ gf_m2ts_flush_temi(ts, pes);
+
+ if (! ts->start_range)
+ remain = pes->reframe(ts, pes, same_pts, pes->pck_data+offset, pes->pck_data_len-offset, &pesh);
+
+ //CLEANUP alloc stuff
if (pes->prev_data) gf_free(pes->prev_data);
pes->prev_data = NULL;
pes->prev_data_len = 0;
if (remain) {
pes->prev_data = gf_malloc(sizeof(char)*remain);
- memcpy(pes->prev_data, pes->data + pes->data_len - remain, remain);
+ memcpy(pes->prev_data, pes->pck_data + pes->pck_data_len - remain, remain);
pes->prev_data_len = remain;
}
}
} else {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: unknown stream ID %08X\n", pes->pid, stream_id));
}
- } else if (pes->data) {
+ } else if (pes->pck_data_len) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Bad PES Header, discarding packet (maybe stream is encrypted ?)\n", pes->pid));
}
- if (pes->data) gf_free(pes->data);
- pes->data = NULL;
- pes->data_len = 0;
+ pes->pck_data_len = 0;
pes->pes_len = 0;
pes->rap = 0;
}
disc = 0;
if (disc) {
if (hdr->payload_start) {
- if (pes->data) {
+ if (pes->pck_data_len) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - may have lost end of previous PES\n", pes->pid, expect_cc, hdr->continuity_counter));
}
} else {
- if (pes->data) {
+ if (pes->pck_data_len) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - trashing PES packet\n", pes->pid, expect_cc, hdr->continuity_counter));
- gf_free(pes->data);
- pes->data = NULL;
}
- pes->data_len = 0;
+ pes->pck_data_len = 0;
pes->pes_len = 0;
pes->cc = -1;
return;
pes->before_last_pcr_value_pck_number = pes->program->before_last_pcr_value_pck_number;
pes->last_pcr_value = pes->program->last_pcr_value;
pes->last_pcr_value_pck_number = pes->program->last_pcr_value_pck_number;
- } else if (pes->pes_len && (pes->data_len + data_size == pes->pes_len + 6)) {
+ } else if (pes->pes_len && (pes->pck_data_len + data_size == pes->pes_len + 6)) {
/* 6 = startcode+stream_id+length*/
/*reassemble pes*/
- if (pes->data) pes->data = (u8*)gf_realloc(pes->data, pes->data_len+data_size);
- else pes->data = (u8*)gf_malloc(data_size);
- memcpy(pes->data+pes->data_len, data, data_size);
- pes->data_len += data_size;
+ if (pes->pck_data_len + data_size > pes->pck_alloc_len) {
+ pes->pck_alloc_len = pes->pck_data_len + data_size;
+ pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len);
+ }
+ memcpy(pes->pck_data+pes->pck_data_len, data, data_size);
+ pes->pck_data_len += data_size;
/*force discard*/
data_size = 0;
flush_pes = 1;
}
/*PES first fragment: flush previous packet*/
- if (flush_pes && pes->data) {
+ if (flush_pes && pes->pck_data_len) {
gf_m2ts_flush_pes(ts, pes);
if (!data_size) return;
}
/*we need to wait for first packet of PES*/
- if (!pes->data_len && !hdr->payload_start) {
+ if (!pes->pck_data_len && !hdr->payload_start) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Waiting for PES header, trashing data\n", hdr->pid));
return;
}
/*reassemble*/
- if (pes->data){
- pes->data = (u8*)gf_realloc(pes->data, pes->data_len+data_size);
- //fprintf(stderr, "[MPEG-2 TS] REALLOC \n");
- }else{
- pes->data = (u8*)gf_malloc(data_size);
+ if (pes->pck_data_len + data_size > pes->pck_alloc_len ){
+ pes->pck_alloc_len = pes->pck_data_len + data_size;
+ pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len);
}
- memcpy(pes->data+pes->data_len, data, data_size);
- pes->data_len += data_size;
+ memcpy(pes->pck_data + pes->pck_data_len, data, data_size);
+ pes->pck_data_len += data_size;
if (paf && paf->random_access_indicator) pes->rap = 1;
- if (hdr->payload_start && !pes->pes_len && (pes->data_len>=6)) {
- pes->pes_len = (pes->data[4]<<8) | pes->data[5];
+ if (hdr->payload_start && !pes->pes_len && (pes->pck_data_len>=6)) {
+ pes->pes_len = (pes->pck_data[4]<<8) | pes->pck_data[5];
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Got PES packet len %d\n", pes->pid, pes->pes_len));
- if (pes->pes_len + 6 == pes->data_len) {
+ if (pes->pes_len + 6 == pes->pck_data_len) {
gf_m2ts_flush_pes(ts, pes);
}
}
static void gf_m2ts_get_adaptation_field(GF_M2TS_Demuxer *ts, GF_M2TS_AdaptationField *paf, unsigned char *data, u32 size, u32 pid)
{
- char *af_extension;
+ unsigned char *af_extension;
paf->discontinuity_indicator = (data[0] & 0x80) ? 1 : 0;
paf->random_access_indicator = (data[0] & 0x40) ? 1 : 0;
paf->priority_indicator = (data[0] & 0x20) ? 1 : 0;
if (! af_desc_not_present) {
while (afext_bytes) {
- char URL[255];
GF_BitStream *bs;
char *desc;
u8 desc_tag = af_extension[0];
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Descriptor found (tag %d) size is %d but only %d bytes available\n", pid, desc_tag, desc_len, afext_bytes));
break;
}
- desc = af_extension+2;
+ desc = (char *) af_extension+2;
bs = gf_bs_new(desc, desc_len, GF_BITSTREAM_READ);
switch (desc_tag) {
case GF_M2TS_AFDESC_LOCATION_DESCRIPTOR:
{
- //u32 timeline_id;
- Bool external_url, use_base_temi_url;
- /*Bool force_reload = */gf_bs_read_int(bs, 1);
- /*Bool is_announcement = */gf_bs_read_int(bs, 1);
- /*Bool splicing_flag = */gf_bs_read_int(bs, 1);
+ Bool external_url , use_base_temi_url;
+ char URL[255];
+ GF_M2TS_TemiLocationDescriptor temi_loc;
+ memset(&temi_loc, 0, sizeof(GF_M2TS_TemiLocationDescriptor) );
+ temi_loc.reload_external = gf_bs_read_int(bs, 1);
+ temi_loc.is_announce = gf_bs_read_int(bs, 1);
+ temi_loc.is_splicing = gf_bs_read_int(bs, 1);
external_url = gf_bs_read_int(bs, 1);
use_base_temi_url = gf_bs_read_int(bs, 1);
gf_bs_read_int(bs, 3); //reserved
- /*timeline_id = */gf_bs_read_int(bs, 8);
+ temi_loc.timeline_id = gf_bs_read_int(bs, 8);
if (!external_url) {
if (!use_base_temi_url) {
char *_url = URL;
gf_bs_read_data(bs, _url, url_len);
_url[url_len] = 0;
}
+ temi_loc.external_URL = URL;
}
+ GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Location descriptor found - URL %s\n", pid, URL));
+ if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TEMI_LOCATION, &temi_loc);
}
- GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Location descriptor found - URL %s\n", pid, URL));
break;
case GF_M2TS_AFDESC_TIMELINE_DESCRIPTOR:
-
- GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Timeline descriptor found\n", pid));
+ if (ts->ess[pid] && (ts->ess[pid]->flags & GF_M2TS_ES_IS_PES)) {
+ GF_M2TS_PES *pes = (GF_M2TS_PES *) ts->ess[pid];
+ if (pes->temi_tc_desc_alloc_size < desc_len) {
+ pes->temi_tc_desc = gf_realloc(pes->temi_tc_desc, desc_len);
+ pes->temi_tc_desc_alloc_size = desc_len;
+ }
+ memcpy(pes->temi_tc_desc, desc, desc_len);
+ pes->temi_tc_desc_len = desc_len;
+
+ GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Timeline descriptor found\n", pid));
+ }
break;
}
gf_bs_del(bs);
if (!pes || (pes->pid==pes->program->pmt_pid)) continue;
pes->cc = -1;
pes->frame_state = 0;
- if (pes->data) gf_free(pes->data);
- pes->data = NULL;
- pes->data_len = 0;
+ pes->pck_data_len = 0;
if (pes->prev_data) gf_free(pes->prev_data);
pes->prev_data = NULL;
pes->prev_data_len = 0;
pes->pes_len = pes->pes_end_packet_number = pes->pes_start_packet_number = 0;
if (pes->buf) gf_free(pes->buf);
pes->buf = NULL;
- pes->buf_len = 0;
+ if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc);
+ pes->temi_tc_desc = NULL;
+ pes->temi_tc_desc_len = pes->temi_tc_desc_alloc_size = 0;
+
pes->before_last_pcr_value = pes->before_last_pcr_value_pck_number = 0;
pes->last_pcr_value = pes->last_pcr_value_pck_number = 0;
if (pes->program->pcr_pid==pes->pid) {
case GF_M2TS_PES_FRAMING_SKIP_NO_RESET:
pes->reframe = NULL;
break;
+ case GF_M2TS_PES_FRAMING_DEFAULT_NAL:
case GF_M2TS_PES_FRAMING_DEFAULT:
default:
switch (pes->stream_type) {
case GF_M2TS_VIDEO_H264:
case GF_M2TS_VIDEO_SVC:
pes->reframe = gf_m2ts_reframe_avc_h264;
+ pes->single_nal_mode = (mode==GF_M2TS_PES_FRAMING_DEFAULT_NAL) ? 1 : 0;
break;
case GF_M2TS_VIDEO_HEVC:
case GF_M2TS_VIDEO_SHVC:
pes->reframe = gf_m2ts_reframe_hevc;
+ pes->single_nal_mode = (mode==GF_M2TS_PES_FRAMING_DEFAULT_NAL) ? 1 : 0;
break;
case GF_M2TS_AUDIO_AAC:
pes->reframe = gf_m2ts_reframe_aac_adts;
case GF_M2TS_PRIVATE_DATA:
/* TODO: handle DVB subtitle streams */
+ break;
+ case GF_M2TS_METADATA_ID3_HLS:
+ pes->reframe = gf_m2ts_reframe_id3_pes;
+ break;
default:
pes->reframe = gf_m2ts_reframe_default;
break;
u32 pos = 0;
GF_BitStream *ts_bs = NULL;
- if (ts->start_range && ts->duration) {
- Double perc = ts->start_range / (1000 * ts->duration);
- pos = (u32) (s64) (perc * ts->file_size);
- /*align to TS packet size*/
- while (pos%188) pos++;
- if (pos>=ts->file_size) {
- ts->start_range = 0;
- pos = 0;
- }
- }
-
if (ts->file)
ts_bs = gf_bs_from_file(ts->file, GF_BITSTREAM_READ);
else
ts_bs = gf_bs_new(ts->ts_data_chunk, ts->ts_data_chunk_size, GF_BITSTREAM_READ);
while (ts->run_state && gf_bs_available(ts_bs) && !ts->force_file_refresh) {
+
+ if (ts->start_range && ts->duration) {
+ Double perc = ts->start_range / (1000 * ts->duration);
+ pos = (u32) (s64) (perc * ts->file_size);
+ /*align to TS packet size*/
+ pos/=188;
+ pos*=188;
+
+ if (pos>=ts->file_size) {
+ pos = 0;
+ }
+ ts->start_range = 0;
+ gf_bs_seek(ts_bs, pos);
+ }
+
/*m2ts chunks by chunks*/
size = gf_bs_read_data(ts_bs, data, 188);
if (!size && (ts->loop_demux == 1)) {
u32 size;
u8 *mem_address;
if (sscanf(fileName, "gmem://%d@%p", &size, &mem_address) != 2) {
- return GF_URL_ERROR;
+ return GF_FALSE;
}
while (size>188 && count) {
if (mem_address[0] != 0x47)
size-=188;
count--;
}
- return 1;
+ return GF_TRUE;
}
t = gf_f64_open(fileName, "rb");
else count--;
}
if (t) fclose(t);
- return count ? 0 : 1;
+ return count ? GF_FALSE : GF_TRUE;
}
static void rewrite_pts_dts(unsigned char *ptr, u64 TS)
if (sum == 0)
{
ErrorLocs[NErrors] = (255-r); NErrors++;
- if (DEBUG) fprintf(stderr, "Root found at r = %d, (255-r) = %d\n", r, (255-r));
+ if (RS_DEBUG) fprintf(stderr, "Root found at r = %d, (255-r) = %d\n", r, (255-r));
}
}
}
/* first check for illegal error locs */
for (r = 0; r < NErrors; r++) {
if (ErrorLocs[r] >= csize) {
- if (DEBUG) fprintf(stderr, "Error loc i=%d outside of codeword length %d\n", i, csize);
+ if (RS_DEBUG) fprintf(stderr, "Error loc i=%d outside of codeword length %d\n", i, csize);
return(0);
}
}
}
err = gmult(num, ginv(denom));
- if (DEBUG) fprintf(stderr, "Error magnitude %#x at loc %d\n", err, csize-i);
+ if (RS_DEBUG) fprintf(stderr, "Error magnitude %#x at loc %d\n", err, csize-i);
codeword[csize-i-1] ^= err;
}
return(1);
}
else {
- if (DEBUG && NErrors) fprintf(stderr, "Uncorrectable codeword\n");
+ if (RS_DEBUG && NErrors) fprintf(stderr, "Uncorrectable codeword\n");
return(0);
}
}
/* generator polynomial */
int genPoly[MAXDEG*2];
- int DEBUG = FALSE;
+ int RS_DEBUG = FALSE;
static void
compute_genpoly (int nbytes, int genpoly[]);
if (box->id) gf_isom_box_del((GF_Box *)box->id);
if (box->settings) gf_isom_box_del((GF_Box *)box->settings);
if (box->payload) gf_isom_box_del((GF_Box *)box->payload);
+ gf_free(s);
}
void vtte_del(GF_Box *s)
return cue;
}
+GF_EXPORT
void gf_webvtt_cue_del(GF_WebVTTCue * cue)
{
if (cue) {
return GF_OK;
}
+GF_EXPORT
GF_List *gf_webvtt_parse_iso_cues(GF_ISOSample *iso_sample, u64 start)
{
return gf_webvtt_parse_cues_from_data(iso_sample->data, iso_sample->dataLength, start);
return GF_OK;
}
-void gf_webvtt_dump_cue(void *user, GF_WebVTTCue *cue)
+static void gf_webvtt_dump_cue(void *user, GF_WebVTTCue *cue)
{
FILE *dump = (FILE *)user;
if (!cue || !dump) return;
fprintf(dump, "\n");
}
-GF_Err gf_webvtt_dump_cues(FILE *dump, GF_List *cues)
+static GF_Err gf_webvtt_dump_cues(FILE *dump, GF_List *cues)
{
u32 i;
for (i = 0; i < gf_list_count(cues); i++) {
parser->initialized = 0;
return GF_OK;
}
- strncpy(BOM, str, 5);
+ strncpy((char *) BOM, str, 5);
}
/*0: no unicode, 1: UTF-16BE, 2: UTF-16LE*/
u16 *uniLine;
str = (char*)((SFScript *)ptr)->script_text;
len = (u32)strlen(str);
- uniLine = (u16*)gf_malloc(sizeof(short) * len);
+ uniLine = (u16*)gf_malloc(sizeof(short) * (len+1));
_len = gf_utf8_mbstowcs(uniLine, len, (const char **) &str);
if (_len != (size_t) -1) {
len = (u32) _len;
memset(dst, 0, sizeof(char)*8);
gf_bs_read_data(read->bs, src, size);
dst_size -= 8;
- uncompress(dst+8, (uLongf *)&dst_size, src, size);
+ uncompress((Bytef *) dst+8, (uLongf *)&dst_size, (Bytef *) src, size);
dst_size += 8;
gf_free(src);
read->mem = dst;
osize = w*h;
dst = gf_malloc(sizeof(char)*osize);
- uncompress(dst, (uLongf *) &osize, buf, AlphaPlaneSize);
+ uncompress((Bytef *) dst, (uLongf *) &osize, buf, AlphaPlaneSize);
/*write alpha channel*/
for (j=0; j<osize; j++) {
raw[4*j + 3] = dst[j];
if (!node) return;
if (node->sgprivate->UserCallback) node->sgprivate->UserCallback(node, NULL, 1);
+
+ if (node->sgprivate->scenegraph && node->sgprivate->scenegraph->NodeCallback)
+ node->sgprivate->scenegraph->NodeCallback(node->sgprivate->scenegraph->userpriv, GF_SG_CALLBACK_NODE_DESTROY, node, NULL);
if (node->sgprivate->interact) {
if (node->sgprivate->interact->routes) {
}
GF_EXPORT
-Bool sg_fire_dom_event(GF_DOMEventTarget *et, GF_DOM_Event *event, GF_SceneGraph *sg, GF_Node *n)
+Bool gf_sg_fire_dom_event(GF_DOMEventTarget *et, GF_DOM_Event *event, GF_SceneGraph *sg, GF_Node *n)
{
if (et) {
if (et->ptr_type==GF_DOM_EVENT_TARGET_NODE ||
et->ptr_type == GF_DOM_EVENT_TARGET_DOCUMENT ||
et->ptr_type == GF_DOM_EVENT_TARGET_XHR ||
et->ptr_type == GF_DOM_EVENT_TARGET_MSE_MEDIASOURCE ||
- et->ptr_type == GF_DOM_EVENT_TARGET_HTML_MEDIA) {
+ et->ptr_type == GF_DOM_EVENT_TARGET_MSE_SOURCEBUFFER ||
+ et->ptr_type == GF_DOM_EVENT_TARGET_MSE_SOURCEBUFFERLIST ) {
GF_Node *observer = NULL;
u32 i, count, post_count;
if (et->ptr_type==GF_DOM_EVENT_TARGET_NODE) {
if (!parent) {
/*top of the graph, use Document*/
if (node->sgprivate->scenegraph->RootNode==node)
- sg_fire_dom_event(node->sgprivate->scenegraph->dom_evt, event, node->sgprivate->scenegraph, NULL);
+ gf_sg_fire_dom_event(node->sgprivate->scenegraph->dom_evt, event, node->sgprivate->scenegraph, NULL);
return;
}
if (cur_par_idx) {
else cur_par_idx = 0;
/*if no events attached,bubble by default*/
if (parent->sgprivate->interact) {
- Bool can_bubble = sg_fire_dom_event(parent->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, parent);
+ Bool can_bubble = gf_sg_fire_dom_event(parent->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, parent);
if (!can_bubble) {
return;
}
/*if no events attached,bubble by default*/
if (parent->sgprivate->interact) {
Bool can_bubble;
- can_bubble = sg_fire_dom_event(parent->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, parent);
+ can_bubble = gf_sg_fire_dom_event(parent->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, parent);
if(!can_bubble) return;
}
gf_sg_dom_event_bubble(parent, event, use_stack, cur_par_idx);
for (i=0; i<count; i++) {
GF_Node *n = (GF_Node *)gf_list_get(parents, i);
if (n->sgprivate->interact)
- sg_fire_dom_event(n->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, n);
+ gf_sg_fire_dom_event(n->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, n);
/*event has been canceled*/
if (event->event_phase & (GF_DOM_EVENT_PHASE_CANCEL|GF_DOM_EVENT_PHASE_CANCEL_ALL) ) {
sg->abort_bubbling = GF_FALSE;
if (node->sgprivate->interact) {
- can_bubble = sg_fire_dom_event(node->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, node);
+ can_bubble = gf_sg_fire_dom_event(node->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, node);
}
if ( (!node->sgprivate->interact || can_bubble) && event->bubbles) {
/*bubbling phase*/
gf_free(target);
}
+GF_DOMEventTarget *gf_dom_event_get_target_from_node(GF_Node *n)
+{
+ GF_DOMEventTarget *target = NULL;
+ //GF_HTML_MediaElement *me = html_media_element_get_from_node(c, n);
+ //*target = me->evt_target;
+
+ if (!n->sgprivate->interact) {
+ GF_SAFEALLOC(n->sgprivate->interact, struct _node_interactive_ext);
+ }
+ if (!n->sgprivate->interact->dom_evt) {
+ n->sgprivate->interact->dom_evt = gf_dom_event_target_new(GF_DOM_EVENT_TARGET_NODE, n);
+ }
+ target = n->sgprivate->interact->dom_evt;
+
+ return target;
+}
+
#endif //GPAC_DISABLE_SVG
EVENT_JSPROPERTY_TRANSLATIONY = -37,
EVENT_JSPROPERTY_TYPE3D = -38,
EVENT_JSPROPERTY_ERROR = -39,
+ EVENT_JSPROPERTY_DYNAMIC_SCENE = -40,
} GF_DOMEventJSProperty;
typedef enum {
DOMNodeList *nl;
u32 count;
- u32 idx;
+ s32 idx;
if (!GF_JS_InstanceOf(c, obj, &dom_rt->domNodeListClass, NULL)) {
return JS_TRUE;
}
if (gf_dom_event_get_category(evtType) == GF_DOM_EVENT_MEDIA) {
void gf_html_media_get_event_target(JSContext *c, JSObject *obj, GF_DOMEventTarget **target, GF_SceneGraph **sg);
gf_html_media_get_event_target(c, obj, target, sg);
- } else if (gf_dom_event_get_category(evtType) == GF_DOM_EVENT_MEDIASOURCE) {
+ if (*target && *sg) return JS_TRUE;
+ }
+
+ if (gf_dom_event_get_category(evtType) == GF_DOM_EVENT_MEDIASOURCE) {
void gf_mse_get_event_target(JSContext *c, JSObject *obj, GF_DOMEventTarget **target, GF_SceneGraph **sg);
gf_mse_get_event_target(c, obj, target, sg);
- } else if (GF_JS_InstanceOf(c, obj, &dom_rt->domDocumentClass, NULL) || is_svg_document_class(c, obj)) {
+ if (*target && *sg) return JS_TRUE;
+ }
+
+ if (GF_JS_InstanceOf(c, obj, &dom_rt->domDocumentClass, NULL) || is_svg_document_class(c, obj)) {
/*document interface*/
*sg = dom_get_doc(c, obj);
if (*sg) {
} else {
return JS_TRUE;
}
- } else if (GF_JS_InstanceOf(c, obj, &dom_rt->domElementClass, NULL) || is_svg_element_class(c, obj)) {
+ } else if (GF_JS_InstanceOf(c, obj, &dom_rt->domElementClass, NULL) || is_svg_element_class(c, obj) || vrml_node) {
/*Element interface*/
if (vrml_node) {
*n = vrml_node;
hdl = (SVG_handlerElement *) ((XMLRI*)info.far_ptr)->target;
if (!hdl) continue;
if (! JSVAL_IS_NULL(funval) ) {
+#if (JS_VERSION>=185)
+ JSBool res = JS_FALSE;
+ if (! JS_StrictlyEqual(c, funval, *(jsval *)&hdl->js_fun_val, &res))
+ continue;
+#else
if (funval != *(jsval *)&hdl->js_fun_val) continue;
+#endif
} else if (hdl->children) {
txt = (GF_DOMText *) hdl->children->node;
if (txt->sgprivate->tag != TAG_DOMText) continue;
case GF_DOM_EVENT_TARGET_MSE_MEDIASOURCE:
*vp = OBJECT_TO_JSVAL(((GF_HTML_MediaSource *)evt->target)->_this);
break;
+ case GF_DOM_EVENT_TARGET_MSE_SOURCEBUFFER:
+ *vp = OBJECT_TO_JSVAL(((GF_HTML_SourceBuffer *)evt->target)->_this);
+ break;
+ case GF_DOM_EVENT_TARGET_MSE_SOURCEBUFFERLIST:
+ *vp = OBJECT_TO_JSVAL(((GF_HTML_SourceBufferList *)evt->target)->_this);
+ break;
default:
break;
}
case GF_DOM_EVENT_TARGET_MSE_MEDIASOURCE:
*vp = OBJECT_TO_JSVAL(((GF_HTML_MediaSource *)evt->target)->_this);
break;
+ case GF_DOM_EVENT_TARGET_MSE_SOURCEBUFFER:
+ *vp = OBJECT_TO_JSVAL(((GF_HTML_SourceBuffer *)evt->target)->_this);
+ break;
+ case GF_DOM_EVENT_TARGET_MSE_SOURCEBUFFERLIST:
+ *vp = OBJECT_TO_JSVAL(((GF_HTML_SourceBufferList *)evt->target)->_this);
+ break;
default:
break;
}
*vp = INT_TO_JSVAL(evt->detail); return JS_TRUE;
case EVENT_JSPROPERTY_ERROR:
*vp = INT_TO_JSVAL(evt->error_state); return JS_TRUE;
+ case EVENT_JSPROPERTY_DYNAMIC_SCENE:
+ *vp = INT_TO_JSVAL(evt->key_flags ? 1 : 0); return JS_TRUE;
default: return JS_TRUE;
}
if (!GF_JS_InstanceOf(c, obj, &dom_rt->xmlHTTPRequestClass, NULL) ) return;
ctx = (XMLHTTPContext *)SMJS_GET_PRIVATE(c, obj);
if (ctx) {
- if (ctx->onabort) gf_js_remove_root(c, &(ctx->onabort), GF_JSGC_VAL);
- if (ctx->onerror) gf_js_remove_root(c, &(ctx->onerror), GF_JSGC_VAL);
- if (ctx->onload) gf_js_remove_root(c, &(ctx->onload), GF_JSGC_VAL);
- if (ctx->onloadend) gf_js_remove_root(c, &(ctx->onloadend), GF_JSGC_VAL);
- if (ctx->onloadstart) gf_js_remove_root(c, &(ctx->onloadstart), GF_JSGC_VAL);
- if (ctx->onprogress) gf_js_remove_root(c, &(ctx->onprogress), GF_JSGC_VAL);
- if (ctx->onreadystatechange) gf_js_remove_root(c, &(ctx->onreadystatechange), GF_JSGC_VAL);
- if (ctx->ontimeout) gf_js_remove_root(c, &(ctx->ontimeout), GF_JSGC_VAL);
+ if (! JSVAL_IS_NULL(ctx->onabort)) gf_js_remove_root(c, &(ctx->onabort), GF_JSGC_VAL);
+ if (! JSVAL_IS_NULL(ctx->onerror)) gf_js_remove_root(c, &(ctx->onerror), GF_JSGC_VAL);
+ if (! JSVAL_IS_NULL(ctx->onload)) gf_js_remove_root(c, &(ctx->onload), GF_JSGC_VAL);
+ if (! JSVAL_IS_NULL(ctx->onloadend)) gf_js_remove_root(c, &(ctx->onloadend), GF_JSGC_VAL);
+ if (! JSVAL_IS_NULL(ctx->onloadstart)) gf_js_remove_root(c, &(ctx->onloadstart), GF_JSGC_VAL);
+ if (! JSVAL_IS_NULL(ctx->onprogress)) gf_js_remove_root(c, &(ctx->onprogress), GF_JSGC_VAL);
+ if (! JSVAL_IS_NULL(ctx->onreadystatechange)) gf_js_remove_root(c, &(ctx->onreadystatechange), GF_JSGC_VAL);
+ if (! JSVAL_IS_NULL(ctx->ontimeout)) gf_js_remove_root(c, &(ctx->ontimeout), GF_JSGC_VAL);
xml_http_reset(ctx);
gf_dom_event_target_del(ctx->event_target);
ctx->event_target = NULL;
xhr_evt.type = evtType;
xhr_evt.target = ctx->event_target->ptr;
xhr_evt.target_type = ctx->event_target->ptr_type;
- sg_fire_dom_event(ctx->event_target, &xhr_evt, ctx->owning_graph, NULL);
+ gf_sg_fire_dom_event(ctx->event_target, &xhr_evt, ctx->owning_graph, NULL);
}
static void xml_http_state_change(XMLHTTPContext *ctx)
jsval rval;
gf_sg_lock_javascript(ctx->c, GF_TRUE);
- if (ctx->onreadystatechange)
+ if (! JSVAL_IS_NULL(ctx->onreadystatechange))
JS_CallFunctionValue(ctx->c, ctx->_this, ctx->onreadystatechange, 0, NULL, &rval);
gf_sg_lock_javascript(ctx->c, GF_FALSE);
ctx->readyState = XHR_READYSTATE_OPENED;
xml_http_state_change(ctx);
xml_http_fire_event(ctx, GF_EVENT_MEDIA_LOAD_START);
- if (ctx->onloadstart) {
+ if (! JSVAL_IS_NULL(ctx->onloadstart) ) {
jsval rval;
return JS_CallFunctionValue(ctx->c, ctx->_this, ctx->onloadstart, 0, NULL, &rval);
}
xml_http_state_change(ctx);
xml_http_fire_event(ctx, GF_EVENT_LOAD);
xml_http_fire_event(ctx, GF_EVENT_MEDIA_LOAD_DONE);
- if (ctx->onload) {
+ if (! JSVAL_IS_NULL(ctx->onload)) {
jsval rval;
JS_CallFunctionValue(ctx->c, ctx->_this, ctx->onload, 0, NULL, &rval);
}
- if (ctx->onloadend) {
+ if (! JSVAL_IS_NULL(ctx->onloadend)) {
jsval rval;
JS_CallFunctionValue(ctx->c, ctx->_this, ctx->onloadend, 0, NULL, &rval);
}
ctx->readyState = XHR_READYSTATE_HEADERS_RECEIVED;
xml_http_state_change(ctx);
xml_http_fire_event(ctx, GF_EVENT_MEDIA_PROGRESS);
- if (ctx->onprogress) {
+ if (! JSVAL_IS_NULL(ctx->onprogress) ) {
jsval rval;
JS_CallFunctionValue(ctx->c, ctx->_this, ctx->onprogress, 0, NULL, &rval);
}
ctx->readyState = XHR_READYSTATE_HEADERS_RECEIVED;
xml_http_state_change(ctx);
xml_http_fire_event(ctx, GF_EVENT_MEDIA_PROGRESS);
- if (ctx->onprogress) {
+ if (! JSVAL_IS_NULL(ctx->onprogress) ) {
jsval rval;
JS_CallFunctionValue(ctx->c, ctx->_this, ctx->onprogress, 0, NULL, &rval);
}
ctx->html_status = 404;
GF_LOG(GF_LOG_ERROR, GF_LOG_SCRIPT, ("[XmlHttpRequest] cannot open local file %s\n", ctx->url));
xml_http_fire_event(ctx, GF_EVENT_ERROR);
- if (ctx->onerror) {
+ if (! JSVAL_IS_NULL(ctx->onerror) ) {
jsval rval;
JS_CallFunctionValue(ctx->c, ctx->_this, ctx->onerror, 0, NULL, &rval);
}
par.msg_type = GF_NETIO_DATA_TRANSFERED;
xml_http_on_data(ctx, &par);
+ if (!ctx->async) {
+ xml_http_terminate(ctx, GF_OK);
+ }
return GF_OK;
}
if (sess) gf_dm_sess_del(sess);
xml_http_fire_event(ctx, GF_EVENT_ABORT);
- if (ctx->onabort) {
+ if (! JSVAL_IS_NULL(ctx->onabort)) {
jsval rval;
return JS_CallFunctionValue(ctx->c, ctx->_this, ctx->onabort, 0, NULL, &rval);
}
*vp = JSVAL_VOID;
switch (SMJS_ID_TO_INT(id)) {
case XHR_ONABORT:
- if (ctx->onabort) {
+ if (! JSVAL_IS_NULL(ctx->onabort)) {
*vp = ctx->onabort;
}
return JS_TRUE;
case XHR_ONERROR:
- if (ctx->onerror) {
+ if (! JSVAL_IS_NULL(ctx->onerror)) {
*vp = ctx->onerror;
}
return JS_TRUE;
case XHR_ONLOAD:
- if (ctx->onload) {
+ if (! JSVAL_IS_NULL(ctx->onload)) {
*vp = ctx->onload;
}
return JS_TRUE;
case XHR_ONLOADSTART:
- if (ctx->onloadstart) {
+ if (! JSVAL_IS_NULL(ctx->onloadstart) ) {
*vp = ctx->onloadstart;
}
return JS_TRUE;
case XHR_ONLOADEND:
- if (ctx->onloadend) {
+ if (! JSVAL_IS_NULL(ctx->onloadend)) {
*vp = ctx->onloadend;
}
return JS_TRUE;
case XHR_ONPROGRESS:
- if (ctx->onprogress) {
+ if (! JSVAL_IS_NULL(ctx->onprogress) ) {
*vp = ctx->onprogress;
}
return JS_TRUE;
case XHR_ONREADYSTATECHANGE:
- if (ctx->onreadystatechange) {
+ if (! JSVAL_IS_NULL(ctx->onreadystatechange)) {
*vp = ctx->onreadystatechange;
}
return JS_TRUE;
case XHR_ONTIMEOUT:
- if (ctx->ontimeout) {
+ if (! JSVAL_IS_NULL(ctx->ontimeout)) {
*vp = ctx->ontimeout;
}
return JS_TRUE;
JSBool gf_set_js_eventhandler(JSContext *c, jsval vp, jsval *callbackfuncval) {
if (!callbackfuncval) return JS_FALSE;
- if (*callbackfuncval) {
+ if (! JSVAL_IS_NULL( *callbackfuncval )) {
gf_js_remove_root(c, callbackfuncval, GF_JSGC_VAL);
}
if (JSVAL_IS_VOID(vp)) {
} else if (JSVAL_IS_OBJECT(vp)) {
*callbackfuncval = vp;
}
- if (*callbackfuncval) {
+ if (! JSVAL_IS_NULL( *callbackfuncval )) {
gf_js_add_root(c, callbackfuncval, GF_JSGC_VAL);
}
return JS_TRUE;
static SMJS_FUNC_PROP_GET( storage_getProperty)
/*avoids gcc warning*/
- if (!id) id=0;
- if (!GF_JS_InstanceOf(c, obj, &dom_rt->storageClass, NULL) ) return JS_TRUE;
+#ifndef GPAC_CONFIG_DARWIN
+ if (!id) id=0;
+#endif
+ if (!GF_JS_InstanceOf(c, obj, &dom_rt->storageClass, NULL) ) return JS_TRUE;
*vp = JSVAL_VOID;
return JS_TRUE;
}
static SMJS_FUNC_PROP_SET_NOVP( storage_setProperty)
/*avoids gcc warning*/
- if (!id) id=0;
+#ifndef GPAC_CONFIG_DARWIN
+ if (!id) id=0;
+#endif
if (!GF_JS_InstanceOf(c, obj, &dom_rt->storageClass, NULL) ) return JS_TRUE;
return JS_TRUE;
}
SMJS_PROPERTY_SPEC("translation_y", EVENT_JSPROPERTY_TRANSLATIONY,JSPROP_ENUMERATE | JSPROP_PERMANENT | JSPROP_SHARED | JSPROP_READONLY, 0, 0),
SMJS_PROPERTY_SPEC("type3d", EVENT_JSPROPERTY_TYPE3D, JSPROP_ENUMERATE | JSPROP_PERMANENT | JSPROP_SHARED | JSPROP_READONLY, 0, 0),
SMJS_PROPERTY_SPEC("error", EVENT_JSPROPERTY_ERROR, JSPROP_ENUMERATE | JSPROP_PERMANENT | JSPROP_SHARED | JSPROP_READONLY, 0, 0),
+ SMJS_PROPERTY_SPEC("dynamic_scene", EVENT_JSPROPERTY_DYNAMIC_SCENE, JSPROP_ENUMERATE | JSPROP_PERMANENT | JSPROP_SHARED | JSPROP_READONLY, 0, 0),
SMJS_PROPERTY_SPEC(0, 0, 0, 0, 0),
};
GF_Node *n = (GF_Node *)SMJS_GET_PRIVATE(c, obj); \
if (!n || (n->sgprivate->tag != TAG_SVG_video && n->sgprivate->tag != TAG_SVG_audio)) \
{ \
- return JS_TRUE; \
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR); \
} \
me = (GF_HTML_MediaElement *)html_media_element_get_from_node(c, n); \
if (!me) { \
- return JS_TRUE; \
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR); \
}
#define HTML_MEDIA_JS_START HTML_MEDIA_JS_CHECK
mc->c = c;
mc->_this = JS_NewObject(c, &html_media_rt->mediaControllerClass._class, NULL, NULL);
SMJS_SET_PRIVATE(c, mc->_this, mc);
- mc->buffered.c = c;
- mc->buffered._this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, mc->_this);
- SMJS_SET_PRIVATE(c, mc->buffered._this, &mc->buffered);
- mc->played.c = c;
- mc->played._this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, mc->_this);
- SMJS_SET_PRIVATE(c, mc->played._this, &mc->played);
- mc->seekable.c = c;
- mc->seekable._this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, mc->_this);
- SMJS_SET_PRIVATE(c, mc->seekable._this, &mc->seekable);
+ mc->buffered->c = c;
+ mc->buffered->_this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, mc->_this);
+ SMJS_SET_PRIVATE(c, mc->buffered->_this, mc->buffered);
+ mc->played->c = c;
+ mc->played->_this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, mc->_this);
+ SMJS_SET_PRIVATE(c, mc->played->_this, mc->played);
+ mc->seekable->c = c;
+ mc->seekable->_this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, mc->_this);
+ SMJS_SET_PRIVATE(c, mc->seekable->_this, mc->seekable);
}
*/
me->textTracks._this = JS_NewObject(c, &html_media_rt->textTrackListClass._class, NULL, me->_this);
SMJS_SET_PRIVATE(c, me->textTracks._this, &me->textTracks);
- me->buffered.c = c;
- me->buffered._this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, me->_this);
- SMJS_SET_PRIVATE(c, me->buffered._this, &me->buffered);
+ me->buffered->c = c;
+ me->buffered->_this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, me->_this);
+ SMJS_SET_PRIVATE(c, me->buffered->_this, me->buffered);
- me->played.c = c;
- me->played._this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, me->_this);
- SMJS_SET_PRIVATE(c, me->played._this, &me->played);
+ me->played->c = c;
+ me->played->_this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, me->_this);
+ SMJS_SET_PRIVATE(c, me->played->_this, me->played);
- me->seekable.c = c;
- me->seekable._this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, me->_this);
- SMJS_SET_PRIVATE(c, me->seekable._this, &me->seekable);
+ me->seekable->c = c;
+ me->seekable->_this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, me->_this);
+ SMJS_SET_PRIVATE(c, me->seekable->_this, me->seekable);
}
-/*
- * TODO : Unused, create warnings on debian
-static void html_media_script_error(JSContext *c, const char *msg, JSErrorReport *jserr)
-{
- GF_LOG(GF_LOG_ERROR, GF_LOG_SCRIPT, ("[JavaScript] Error: %s - line %d (%s)", msg, jserr->lineno, jserr->linebuf));
-}*/
-
/* Function to browse the tracks in the MediaObject associated with the Media Element and to create appropriate HTML Track objects
*
* \param c The JavaScript Context to create the new JS object
}
}
+/* Used to retrieve the structure implementing the GF_HTML_MediaElement interface associated with this node
+ * Usually this is done with the private stack of the node (see gf_node_get_private), but in this case,
+ * the stack already contains the rendering stack SVG_video_stack.
+ * So, we store the structure implementing the GF_HTML_MediaElement interface in the JavaScript context of this node,
+ * as a non enumeratable property named 'gpac_me_impl'
+ *
+ * \param c the global JavaScript context
+ * \param n the audio or video node
+ * \return the GF_HTML_MediaElement associated with this node in the given context
+ */
+static GF_HTML_MediaElement *html_media_element_get_from_node(JSContext *c, GF_Node *n)
+{
+ jsval vp;
+ JSObject *me_obj;
+ JSObject *node_obj;
+ GF_HTML_MediaElement *me = NULL;
+
+ if ((n->sgprivate->tag == TAG_SVG_video || n->sgprivate->tag == TAG_SVG_audio) && n->sgprivate->interact && n->sgprivate->interact->js_binding) {
+ node_obj = (JSObject *)n->sgprivate->interact->js_binding->node;
+ if (node_obj) {
+ JS_GetProperty(c, node_obj, "gpac_me_impl", &vp);
+ me_obj = JSVAL_TO_OBJECT(vp);
+ me = (GF_HTML_MediaElement *)SMJS_GET_PRIVATE(c, me_obj);
+ }
+ }
+ return me;
+}
+
static JSBool SMJS_FUNCTION(html_media_load)
{
SMJS_OBJ
GF_JS_InstanceOf(c, obj, &html_media_rt->htmlVideoElementClass, NULL) ||
GF_JS_InstanceOf(c, obj, &html_media_rt->htmlMediaElementClass, NULL))
{
- /* mo->odm->net_service */
- }
- return JS_TRUE;
+ MFURL mfurl;
+ GF_Node *n = (GF_Node *)SMJS_GET_PRIVATE(c, obj);
+ GF_HTML_MediaElement *me = html_media_element_get_from_node(c, n);
+ mfurl.count = 1;
+ mfurl.vals = (SFURL *)gf_malloc(sizeof(SFURL));
+ mfurl.vals[0].url = me->currentSrc;
+ mfurl.vals[0].OD_ID = GF_MEDIA_EXTERNAL_ID;
+ gf_mo_register(n, &mfurl, GF_FALSE, GF_FALSE);
+ gf_free(mfurl.vals);
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static JSBool SMJS_FUNCTION(html_media_canPlayType)
GF_JS_InstanceOf(c, obj, &html_media_rt->htmlVideoElementClass, NULL) ||
GF_JS_InstanceOf(c, obj, &html_media_rt->htmlMediaElementClass, NULL))
{
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static JSBool SMJS_FUNCTION(html_media_fastSeek)
GF_JS_InstanceOf(c, obj, &html_media_rt->htmlVideoElementClass, NULL) ||
GF_JS_InstanceOf(c, obj, &html_media_rt->htmlMediaElementClass, NULL))
{
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static JSBool SMJS_FUNCTION(html_media_addTextTrack)
{
- return JS_TRUE;
+ SMJS_OBJ
+ if (GF_JS_InstanceOf(c, obj, &html_media_rt->htmlAudioElementClass, NULL) ||
+ GF_JS_InstanceOf(c, obj, &html_media_rt->htmlVideoElementClass, NULL) ||
+ GF_JS_InstanceOf(c, obj, &html_media_rt->htmlMediaElementClass, NULL))
+ {
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
void *html_get_element_class(GF_Node *n)
}
}
-/* Used to retrieve the structure implementing the GF_HTML_MediaElement interface associated with this node
- * Usually this is done with the private stack of the node (see gf_node_get_private), but in this case,
- * the stack already contains the rendering stack SVG_video_stack.
- * So, we store the structure implementing the GF_HTML_MediaElement interface in the JavaScript context of this node,
- * as a non enumeratable property named 'gpac_me_impl'
- *
- * \param c the global JavaScript context
- * \param n the audio or video node
- * \return the GF_HTML_MediaElement associated with this node in the given context
- */
-static GF_HTML_MediaElement *html_media_element_get_from_node(JSContext *c, GF_Node *n)
-{
- jsval vp;
- JSObject *me_obj;
- JSObject *node_obj;
- GF_HTML_MediaElement *me = NULL;
-
- if ((n->sgprivate->tag == TAG_SVG_video || n->sgprivate->tag == TAG_SVG_audio) && n->sgprivate->interact && n->sgprivate->interact->js_binding) {
- node_obj = (JSObject *)n->sgprivate->interact->js_binding->node;
- if (node_obj)
- {
- JS_GetProperty(c, node_obj, "gpac_me_impl", &vp);
- me_obj = JSVAL_TO_OBJECT(vp);
- me = (GF_HTML_MediaElement *)SMJS_GET_PRIVATE(c, me_obj);
- }
- }
- return me;
-}
-
/* Creates the GF_HTML_MediaElement structure for this node
* Store it in the JavaScript context of this node, as a non enumeratable property named 'gpac_me_impl'
* see \ref html_media_element_get_from_node for retrieving it
*/
void html_media_element_js_init(JSContext *c, JSObject *node_obj, GF_Node *n)
{
- if (n->sgprivate->tag == TAG_SVG_video || n->sgprivate->tag == TAG_SVG_audio)
- {
+ if (n->sgprivate->tag == TAG_SVG_video || n->sgprivate->tag == TAG_SVG_audio) {
GF_HTML_MediaElement *me;
me = gf_html_media_element_new(n, NULL);
gf_html_media_element_init_js(me, c, node_obj);
if (GF_JS_InstanceOf(c, obj, &html_media_rt->htmlVideoElementClass, NULL) ||
GF_JS_InstanceOf(c, obj, &html_media_rt->htmlAudioElementClass, NULL) ) {
GF_Node *n = (GF_Node *)SMJS_GET_PRIVATE(c, obj);
- *target = gf_html_media_get_event_target_from_node(n);
+ *target = gf_dom_event_get_target_from_node(n);
*sg = n->sgprivate->scenegraph;
} else {
*target = NULL;
*vp = INT_TO_JSVAL(error->code);
return JS_TRUE;
}
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_media_get_error)
if (JSVAL_CHECK_STRING(*vp)) {
char *str = SMJS_CHARS(c, *vp);
gf_svg_set_attributeNS(n, GF_XMLNS_XLINK, "href", str);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_media_get_cors)
if (JSVAL_CHECK_STRING(*vp)) {
char *str = SMJS_CHARS(c, *vp);
gf_svg_set_attributeNS(n, GF_XMLNS_SVG, "crossorigin", str);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
void media_event_collect_info(GF_ClientService *net, GF_ObjectManager *odm, GF_DOMMediaEvent *media_event, u32 *min_time, u32 *min_buffer);
default:
return JS_TRUE;
}
- }
- *vp = INT_TO_JSVAL( v );
- return JS_TRUE;
+ *vp = INT_TO_JSVAL( v );
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_media_get_preload)
if (JSVAL_CHECK_STRING(*vp)) {
char *str = SMJS_CHARS(c, *vp);
gf_svg_set_attributeNS(n, GF_XMLNS_SVG, "preload", str);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_media_get_buffered)
HTML_MEDIA_JS_START
- *vp = OBJECT_TO_JSVAL( me->buffered._this );
+ *vp = OBJECT_TO_JSVAL( me->buffered->_this );
return JS_TRUE;
}
double d;
GF_JSAPIParam par;
HTML_MEDIA_JS_START
- if (!JSVAL_IS_NUMBER(*vp))
- {
- return JS_TRUE;
+ if (!JSVAL_IS_NUMBER(*vp)) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
JS_ValueToNumber(c, *vp, &d);
par.time = d;
- if (ScriptAction(n->sgprivate->scenegraph, GF_JSAPI_OP_SET_TIME, (GF_Node *)n, &par)) {
- return JS_TRUE;
- }
+ ScriptAction(n->sgprivate->scenegraph, GF_JSAPI_OP_SET_TIME, (GF_Node *)n, &par);
return JS_TRUE;
}
static SMJS_FUNC_PROP_SET(html_media_set_default_playback_rate)
jsdouble d;
HTML_MEDIA_JS_START
+ if (!JSVAL_IS_NUMBER(*vp)) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
JS_ValueToNumber(c, *vp, &d);
me->defaultPlaybackRate = d;
return JS_TRUE;
static SMJS_FUNC_PROP_SET(html_media_set_playback_rate)
jsdouble d;
Fixed speed;
- GF_Node *n = (GF_Node *)SMJS_GET_PRIVATE(c, obj);
- GF_MediaObject *mo = gf_html_media_object(n);
+ GF_MediaObject *mo;
+ HTML_MEDIA_JS_START
+ mo = gf_html_media_object(n);
+ if (!JSVAL_IS_NUMBER(*vp)) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
JS_ValueToNumber(c, *vp, &d);
speed = FLT2FIX(d);
gf_mo_set_speed(mo, speed);
static SMJS_FUNC_PROP_GET(html_media_get_played)
HTML_MEDIA_JS_START
- *vp =( OBJECT_TO_JSVAL( me->played._this ) );
+ *vp =( OBJECT_TO_JSVAL( me->played->_this ) );
return JS_TRUE;
}
static SMJS_FUNC_PROP_GET(html_media_get_seekable)
HTML_MEDIA_JS_START
- *vp =( OBJECT_TO_JSVAL( me->seekable._this ) );
+ *vp =( OBJECT_TO_JSVAL( me->seekable->_this ) );
return JS_TRUE;
}
static SMJS_FUNC_PROP_GET(html_media_get_ended)
- GF_Node *n = (GF_Node *)SMJS_GET_PRIVATE(c, obj);
- GF_MediaObject *mo = gf_html_media_object(n);
+ GF_MediaObject *mo;
+ HTML_MEDIA_JS_START
+ mo = gf_html_media_object(n);
*vp = BOOLEAN_TO_JSVAL( gf_mo_is_done(mo) ? JS_TRUE : JS_FALSE);
return JS_TRUE;
}
if (JSVAL_CHECK_STRING(*vp)) {
char *str = SMJS_CHARS(c, *vp);
gf_svg_set_attributeNS(n, GF_XMLNS_SVG, "autoplay", str);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_media_get_loop)
char *str = SMJS_CHARS(c, *vp);
gf_svg_set_attributeNS(n, GF_XMLNS_SVG, "loop", str);
//TODO: use gf_mo_get_loop
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_media_get_mediagroup)
if (JSVAL_CHECK_STRING(*vp)) {
char *str = SMJS_CHARS(c, *vp);
gf_svg_set_attributeNS(n, GF_XMLNS_SVG, "mediagroup", str);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_media_get_controller)
static SMJS_FUNC_PROP_SET(html_media_set_controller)
HTML_MEDIA_JS_START
- me->controller = (GF_HTML_MediaController *)SMJS_GET_PRIVATE(c, JSVAL_TO_OBJECT(*vp));
- return JS_TRUE;
+ if (JSVAL_IS_OBJECT(*vp)) {
+ me->controller = (GF_HTML_MediaController *)SMJS_GET_PRIVATE(c, JSVAL_TO_OBJECT(*vp));
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_media_get_controls)
if (JSVAL_CHECK_STRING(*vp)) {
char *str = SMJS_CHARS(c, *vp);
gf_svg_set_attributeNS(n, GF_XMLNS_SVG, "controls", str);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SVG_audio_stack *html_media_get_audio_stack(GF_Node *n) {
if (audio_stack) {
JS_ValueToNumber(c, *vp, &volume);
audio_stack->input.intensity = FLT2FIX(volume);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_media_get_muted)
audio_stack = html_media_get_audio_stack(n);
if (audio_stack) {
audio_stack->input.is_muted = (JSVAL_TO_BOOLEAN(*vp) == JS_TRUE ? GF_TRUE : GF_FALSE);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_media_get_default_muted)
if (JSVAL_CHECK_STRING(*vp)) {
char *str = SMJS_CHARS(c, *vp);
gf_svg_set_attributeNS(n, GF_XMLNS_SVG, "muted", str);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_media_get_audio_tracks)
}
static SMJS_FUNC_PROP_GET(html_time_ranges_get_length)
- GF_HTML_MediaTimeRanges *timeranges = (GF_HTML_MediaTimeRanges *)SMJS_GET_PRIVATE(c, obj);
+ GF_HTML_MediaTimeRanges *timeranges;
+ if (!GF_JS_InstanceOf(c, obj, &html_media_rt->timeRangesClass, NULL)) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
+ timeranges = (GF_HTML_MediaTimeRanges *)SMJS_GET_PRIVATE(c, obj);
*vp = INT_TO_JSVAL( gf_list_count(timeranges->times)/2);
return JS_TRUE;
}
SMJS_OBJ
SMJS_ARGS
if ((argc!=1) || !GF_JS_InstanceOf(c, obj, &html_media_rt->timeRangesClass, NULL)) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
timeranges = (GF_HTML_MediaTimeRanges *)SMJS_GET_PRIVATE(c, obj);
if (!timeranges) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
if (JSVAL_IS_INT(argv[0])) {
u32 i = JSVAL_TO_INT(argv[0]);
- double *start_value = (double *)gf_list_get(timeranges->times, 2*i);
+ u64 *start_value = (u64 *)gf_list_get(timeranges->times, 2*i);
if (!start_value) {
- dom_throw_exception(c, GF_DOM_EXC_WRONG_DOCUMENT_ERR);
- return JS_FALSE;
+ return dom_throw_exception(c, GF_DOM_EXC_INDEX_SIZE_ERR);
} else {
- SMJS_SET_RVAL(DOUBLE_TO_JSVAL(JS_NewDouble(c, *start_value)));
+ SMJS_SET_RVAL(DOUBLE_TO_JSVAL(JS_NewDouble(c, (*start_value)*1.0/timeranges->timescale)));
}
- }
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
return JS_TRUE;
}
SMJS_OBJ
SMJS_ARGS
if ((argc!=1) || !GF_JS_InstanceOf(c, obj, &html_media_rt->timeRangesClass, NULL)) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
timeranges = (GF_HTML_MediaTimeRanges *)SMJS_GET_PRIVATE(c, obj);
if (!timeranges) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
if (JSVAL_IS_INT(argv[0])) {
u32 i = JSVAL_TO_INT(argv[0]);
- double *start_value = (double *)gf_list_get(timeranges->times, 2*i+1);
- if (!start_value) {
- dom_throw_exception(c, GF_DOM_EXC_WRONG_DOCUMENT_ERR);
- return JS_FALSE;
+ u64 *end_value = (u64 *)gf_list_get(timeranges->times, 2*i+1);
+ if (!end_value) {
+ return dom_throw_exception(c, GF_DOM_EXC_INDEX_SIZE_ERR);
} else {
- SMJS_SET_RVAL(DOUBLE_TO_JSVAL(JS_NewDouble(c, *start_value)));
+ SMJS_SET_RVAL(DOUBLE_TO_JSVAL(JS_NewDouble(c, (*end_value)*1.0/timeranges->timescale)));
}
- }
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
return JS_TRUE;
}
GF_HTML_TrackList *tracklist;
if (html_is_track_list(c, obj)) {
tracklist = (GF_HTML_TrackList *)SMJS_GET_PRIVATE(c, obj);
- *vp = INT_TO_JSVAL( gf_list_count(tracklist->tracks) );
+ if (tracklist) {
+ *vp = INT_TO_JSVAL( gf_list_count(tracklist->tracks) );
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
- return JS_TRUE;
}
static JSBool SMJS_FUNCTION(html_track_list_get_track_by_id)
SMJS_FREE(c, str);
if (track) {
SMJS_SET_RVAL(OBJECT_TO_JSVAL(track->_this));
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
- return JS_TRUE;
}
static SMJS_FUNC_PROP_GET(html_track_list_get_property)
GF_HTML_Track *track = (GF_HTML_Track *)gf_list_get(tracklist->tracks, (u32)index);
*vp = OBJECT_TO_JSVAL(track->_this);
}
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
- return JS_TRUE;
}
static SMJS_FUNC_PROP_GET(html_track_list_get_selected_index)
if (html_is_track_list(c, obj)) {
GF_HTML_TrackList *tracklist = (GF_HTML_TrackList *)SMJS_GET_PRIVATE(c, obj);
*vp = INT_TO_JSVAL(tracklist->selected_index);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_track_list_get_onchange)
if (html_is_track_list(c, obj)) {
GF_HTML_TrackList *tracklist = (GF_HTML_TrackList *)SMJS_GET_PRIVATE(c, obj);
- if (tracklist->onchange) {
+ if (! JSVAL_IS_NULL(tracklist->onchange)) {
*vp = tracklist->onchange;
} else {
*vp = JSVAL_NULL;
}
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
JSBool gf_set_js_eventhandler(JSContext *c, jsval vp, jsval *callbackfuncval);
if (html_is_track_list(c, obj)) {
GF_HTML_TrackList *tracklist = (GF_HTML_TrackList *)SMJS_GET_PRIVATE(c, obj);
gf_set_js_eventhandler(c, *vp, &tracklist->onchange);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_track_list_get_onaddtrack)
if (html_is_track_list(c, obj)) {
GF_HTML_TrackList *tracklist = (GF_HTML_TrackList *)SMJS_GET_PRIVATE(c, obj);
- if (tracklist->onaddtrack) {
+ if (! JSVAL_IS_NULL(tracklist->onaddtrack)) {
*vp = tracklist->onaddtrack;
} else {
*vp = JSVAL_NULL;
}
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_SET(html_track_list_set_onaddtrack)
if (html_is_track_list(c, obj)) {
GF_HTML_TrackList *tracklist = (GF_HTML_TrackList *)SMJS_GET_PRIVATE(c, obj);
gf_set_js_eventhandler(c, *vp, &tracklist->onaddtrack);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_track_list_get_onremovetrack)
if (html_is_track_list(c, obj)) {
GF_HTML_TrackList *tracklist = (GF_HTML_TrackList *)SMJS_GET_PRIVATE(c, obj);
- if (tracklist->onremovetrack) {
+ if (! JSVAL_IS_NULL(tracklist->onremovetrack) ) {
*vp = tracklist->onremovetrack;
} else {
*vp = JSVAL_NULL;
}
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_SET(html_track_list_set_onremovetrack)
if (html_is_track_list(c, obj)) {
GF_HTML_TrackList *tracklist = (GF_HTML_TrackList *)SMJS_GET_PRIVATE(c, obj);
gf_set_js_eventhandler(c, *vp, &tracklist->onremovetrack);
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_track_get_property)
if (html_is_track_list(c, obj)) {
GF_HTML_Track *track = (GF_HTML_Track *)SMJS_GET_PRIVATE(c, obj);
if (!SMJS_ID_IS_INT(id)) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
switch (SMJS_ID_TO_INT(id)) {
case HTML_TRACK_PROP_ID:
}
return JS_TRUE;
}
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_SET(html_track_set_property)
if (html_is_track_list(c, obj)) {
GF_HTML_Track *track = (GF_HTML_Track *)SMJS_GET_PRIVATE(c, obj);
if (!SMJS_ID_IS_INT(id)) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
switch (SMJS_ID_TO_INT(id)) {
case HTML_TRACK_PROP_SELECTED:
}
return JS_TRUE;
}
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_GET(html_video_get_property)
video = (SVG_video_stack *)n->sgprivate->UserPrivate;
if (!SMJS_ID_IS_INT(id)) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
switch (SMJS_ID_TO_INT(id)) {
case HTML_VIDEO_PROP_WIDTH:
}
return JS_TRUE;
}
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static SMJS_FUNC_PROP_SET(html_video_set_property)
{
GF_Node *n = (GF_Node *)SMJS_GET_PRIVATE(c, obj);
if (!SMJS_ID_IS_INT(id)) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
switch (SMJS_ID_TO_INT(id)) {
case HTML_VIDEO_PROP_WIDTH:
return JS_TRUE;
}
}
- }
- return JS_TRUE;
+ return JS_TRUE;
+ } else {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
}
static JSBool SMJS_FUNCTION(html_media_event_add_listener)
sb->_this = JS_NewObject(c, &html_media_rt->sourceBufferClass._class, 0, 0);
//gf_js_add_root(c, &sb->_this, GF_JSGC_OBJECT);
SMJS_SET_PRIVATE(c, sb->_this, sb);
- sb->buffered._this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, sb->_this);
- SMJS_SET_PRIVATE(c, sb->buffered._this, &sb->buffered);
+ sb->buffered->_this = JS_NewObject(c, &html_media_rt->timeRangesClass._class, NULL, sb->_this);
+ SMJS_SET_PRIVATE(c, sb->buffered->_this, sb->buffered);
}
#include <gpac/internal/terminal_dev.h>
SMJS_ARGS
GF_SceneGraph *sg;
GF_JSAPIParam par;
- Bool isSupported;
+ Bool isSupported = GF_TRUE;
char *mime;
- if (!argc || !JSVAL_CHECK_STRING(argv[0]) )
- {
- return JS_TRUE;
+ if (!argc || !JSVAL_CHECK_STRING(argv[0])) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
mime = SMJS_CHARS(c, argv[0]);
sg = mediasource_get_scenegraph(c);
- sg->script_action(sg->script_action_cbck, GF_JSAPI_OP_GET_TERM, NULL, &par);
- isSupported = gf_term_is_type_supported((GF_Terminal *)par.term, mime);
+ assert(sg);
+ if (!strlen(mime)) {
+ isSupported = GF_FALSE;
+ } else {
+ sg->script_action(sg->script_action_cbck, GF_JSAPI_OP_GET_TERM, NULL, &par);
+ isSupported = gf_term_is_type_supported((GF_Terminal *)par.term, mime);
+ }
SMJS_SET_RVAL(BOOLEAN_TO_JSVAL(isSupported ? JS_TRUE : JS_FALSE));
SMJS_FREE(c, mime);
return JS_TRUE;
GF_HTML_MediaSource *ms;
const char *mime;
GF_Err e;
+ u32 exception = 0;
e = GF_OK;
if (!GF_JS_InstanceOf(c, obj, &html_media_rt->mediaSourceClass, NULL) ) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
if (!argc || !JSVAL_CHECK_STRING(argv[0]))
{
return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
mime = SMJS_CHARS(c, argv[0]);
- if (!strlen(mime))
- {
- return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ if (!strlen(mime)) {
+ exception = GF_DOM_EXC_INVALID_ACCESS_ERR;
+ goto exit;
}
ms = (GF_HTML_MediaSource *)SMJS_GET_PRIVATE(c, obj);
- if (ms->readyState != MEDIA_SOURCE_READYSTATE_OPEN)
- {
- dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
- e = GF_BAD_PARAM;
+ if (!ms) {
+ exception = GF_DOM_EXC_INVALID_ACCESS_ERR;
+ goto exit;
+ } else if (ms->readyState != MEDIA_SOURCE_READYSTATE_OPEN) {
+ exception = GF_DOM_EXC_INVALID_STATE_ERR;
goto exit;
}
assert(ms->service);
}
*/
sb = gf_mse_source_buffer_new(ms);
+ assert(sb);
e = gf_mse_source_buffer_load_parser(sb, mime);
if (e == GF_OK) {
- gf_mse_add_source_buffer(ms, sb);
+ gf_mse_mediasource_add_source_buffer(ms, sb);
mediasource_sourceBuffer_initjs(c, obj, sb);
SMJS_SET_RVAL( OBJECT_TO_JSVAL(sb->_this) );
} else {
gf_mse_source_buffer_del(sb);
- dom_throw_exception(c, GF_DOM_EXC_NOT_SUPPORTED_ERR);
+ exception = GF_DOM_EXC_NOT_SUPPORTED_ERR;
}
exit:
- SMJS_FREE(c, (void *)mime);
- if (e == GF_OK) {
- return JS_TRUE;
+ if (mime) {
+ SMJS_FREE(c, (void *)mime);
+ }
+ if (exception) {
+ return dom_throw_exception(c, exception);
} else {
- return JS_FALSE;
+ return JS_TRUE;
}
}
static JSBool SMJS_FUNCTION(mediasource_removeSourceBuffer)
{
SMJS_OBJ
-// SMJS_ARGS
-// GF_HTML_MediaSource *ms;
+ SMJS_ARGS
+ GF_HTML_MediaSource *ms;
+ GF_HTML_SourceBuffer *sb;
+ JSObject *sb_obj;
if (!GF_JS_InstanceOf(c, obj, &html_media_rt->mediaSourceClass, NULL) ) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
-// ms = (GF_HTML_MediaSource *)SMJS_GET_PRIVATE(c, obj);
- /* TODO */
+ ms = (GF_HTML_MediaSource *)SMJS_GET_PRIVATE(c, obj);
+ if (!ms) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
+ if (!argc || JSVAL_IS_NULL(argv[0]) || !JSVAL_IS_OBJECT(argv[0])) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
+ sb_obj = JSVAL_TO_OBJECT(argv[0]);
+ if (!GF_JS_InstanceOf(c, sb_obj, &html_media_rt->sourceBufferClass, NULL) ) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
+ sb = (GF_HTML_SourceBuffer *)SMJS_GET_PRIVATE(c, sb_obj);
+ if (!sb) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ } else {
+ GF_Err e = gf_mse_remove_source_buffer(ms, sb);
+ if (e == GF_NOT_FOUND) {
+ return dom_throw_exception(c, GF_DOM_EXC_NOT_FOUND_ERR);
+ }
+ }
return JS_TRUE;
}
GF_HTML_MediaSource *ms;
u32 i;
if (!GF_JS_InstanceOf(c, obj, &html_media_rt->mediaSourceClass, NULL) ) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
ms = (GF_HTML_MediaSource *)SMJS_GET_PRIVATE(c, obj);
+ if (!ms) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
if (ms->readyState != MEDIA_SOURCE_READYSTATE_OPEN) {
- dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
- return JS_FALSE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
}
for (i = 0; i < gf_list_count(ms->sourceBuffers.list); i++) {
GF_HTML_SourceBuffer *sb = (GF_HTML_SourceBuffer *)gf_list_get(ms->sourceBuffers.list, i);
if (sb->updating) {
- dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
- return JS_FALSE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
}
}
- if (argc > 0)
- {
+ if (argc > 0) {
char *error = NULL;
if (!JSVAL_CHECK_STRING(argv[0])) {
- dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
- return JS_FALSE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
error = SMJS_CHARS(c, argv[0]);
if (strcmp(error, "decode") && strcmp(error, "network")) {
SMJS_FREE(c, error);
- dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
- return JS_FALSE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
- SMJS_FREE(c, error);
+ SMJS_FREE(c, error);
}
gf_mse_mediasource_end(ms);
return JS_TRUE;
static SMJS_FUNC_PROP_GET(media_source_get_source_buffers)
GF_HTML_MediaSource *p;
if (!GF_JS_InstanceOf(c, obj, &html_media_rt->mediaSourceClass, NULL) ) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
p = (GF_HTML_MediaSource *)SMJS_GET_PRIVATE(c, obj);
- if (p) {
+ if (!p) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ } else {
*vp = OBJECT_TO_JSVAL(p->sourceBuffers._this);
return JS_TRUE;
}
static SMJS_FUNC_PROP_GET(media_source_get_ready_state)
GF_HTML_MediaSource *p;
if (!GF_JS_InstanceOf(c, obj, &html_media_rt->mediaSourceClass, NULL) ) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
p = (GF_HTML_MediaSource *)SMJS_GET_PRIVATE(c, obj);
- if (p) {
+ if (!p) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ } else {
switch (p->readyState)
{
case MEDIA_SOURCE_READYSTATE_CLOSED:
*vp = STRING_TO_JSVAL( JS_NewStringCopyZ(c, "ended"));
break;
}
- return JS_TRUE;
}
return JS_TRUE;
}
}
static SMJS_FUNC_PROP_SET(media_source_set_duration)
+ GF_HTML_MediaSource *ms;
+ if (!GF_JS_InstanceOf(c, obj, &html_media_rt->mediaSourceClass, NULL)) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
+ ms = (GF_HTML_MediaSource *)SMJS_GET_PRIVATE(c, obj);
+ if (!ms) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ } else {
+ if (ms->readyState != MEDIA_SOURCE_READYSTATE_OPEN) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
+ } else if (!JSVAL_IS_NUMBER(*vp)) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ } else {
+ u32 i, count;
+ count = gf_list_count(ms->sourceBuffers.list);
+ for (i = 0; i < count; i++) {
+ GF_HTML_SourceBuffer *sb = (GF_HTML_SourceBuffer *)gf_list_get(ms->sourceBuffers.list, i);
+ if (sb->updating) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
+ }
+ }
+ {
+ jsdouble durationValue;
+ JS_ValueToNumber(c, *vp, &durationValue);
+ if (durationValue < 0) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ } else {
+ ms->duration = durationValue;
+ ms->durationType = DURATION_VALUE;
+ /* TODO: call the run duration algorithm */
+ }
+ }
+ }
+ }
return JS_TRUE;
}
static SMJS_FUNC_PROP_GET( sourcebufferlist_getProperty)
GF_HTML_SourceBufferList *p;
u32 count;
- u32 idx;
+ s32 idx;
if (!GF_JS_InstanceOf(c, obj, &html_media_rt->sourceBufferListClass, NULL) )
{
return JS_TRUE;
GF_HTML_SourceBuffer *sb; \
if (!GF_JS_InstanceOf(c, obj, &html_media_rt->sourceBufferClass, NULL) ) \
{ \
- return dom_throw_exception(c, GF_DOM_EXC_TYPE_MISMATCH_ERR); \
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR); \
}\
sb = (GF_HTML_SourceBuffer *)SMJS_GET_PRIVATE(c, obj);\
- /* check if this source buffer is still in the list of source buffers */\
- if (!sb || gf_list_find(sb->mediasource->sourceBuffers.list, sb) < 0)\
+ if (!sb)\
{\
- return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR); \
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR); \
}
#define SB_UPDATING_CHECK \
SB_BASIC_CHECK \
+ /* check if this source buffer is still in the list of source buffers */\
+ if (gf_list_find(sb->mediasource->sourceBuffers.list, sb) < 0)\
+ {\
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR); \
+ } \
if (sb->updating)\
{\
return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR); \
- }\
+ }
/* FIXME : Function not used, generates warning on debian
static DECL_FINALIZE(sourcebuffer_finalize)
gf_mse_mediasource_open(sb->mediasource, NULL);
}
- if (!argc || JSVAL_IS_NULL(argv[0]) || !JSVAL_IS_OBJECT(argv[0]))
- {
+ if (!argc || JSVAL_IS_NULL(argv[0]) || !JSVAL_IS_OBJECT(argv[0])) {
return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
js_ab = JSVAL_TO_OBJECT(argv[0]);
static JSBool SMJS_FUNCTION(sourcebuffer_abort)
{
SMJS_OBJ
-// SMJS_ARGS
SB_BASIC_CHECK
+ if (gf_list_find(sb->mediasource->sourceBuffers.list, sb) < 0) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
+ }
if (sb->mediasource->readyState != MEDIA_SOURCE_READYSTATE_OPEN) {
- return JS_TRUE;
- }
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
+ }
if (gf_mse_source_buffer_abort(sb) != GF_OK) {
return JS_TRUE;
}
jsdouble start, end;
SB_UPDATING_CHECK
if (argc < 2 || !JSVAL_IS_NUMBER(argv[0]) || !JSVAL_IS_NUMBER(argv[1])) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
JS_ValueToNumber(c, argv[0], &start);
JS_ValueToNumber(c, argv[1], &end);
if (start < 0 /* || start > sb->duration */ || start >= end) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
if (sb->mediasource->readyState != MEDIA_SOURCE_READYSTATE_OPEN) {
- return JS_TRUE;
- }
- sb->updating = GF_TRUE;
- if (!sb->remove_thread) {
- sb->remove_thread = gf_th_new(NULL);
+ gf_mse_mediasource_open(sb->mediasource, NULL);
}
- gf_th_run(sb->remove_thread, gf_mse_source_buffer_remove, sb);
+ gf_mse_remove(sb, start, end);
return JS_TRUE;
}
static SMJS_FUNC_PROP_SET(sourceBuffer_set_mode)
char *smode = NULL;
+ GF_HTML_MediaSource_AppendMode mode;
SB_BASIC_CHECK
if (!JSVAL_CHECK_STRING(*vp)) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
smode = SMJS_CHARS(c, *vp);
if (stricmp(smode, "segments") && stricmp(smode, "sequence")) {
- return JS_TRUE;
- }
- if (sb->updating) {
- return JS_TRUE;
+ SMJS_FREE(c, smode);
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
+ if (!stricmp(smode, "segments")) {
+ mode = MEDIA_SOURCE_APPEND_MODE_SEGMENTS;
+ } else if (!stricmp(smode, "sequence")) {
+ mode = MEDIA_SOURCE_APPEND_MODE_SEQUENCE;
+ }
+ SMJS_FREE(c, smode);
+ if (gf_list_find(sb->mediasource->sourceBuffers.list, sb) < 0) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
+ }
+ if (sb->updating) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
+ }
if (sb->mediasource->readyState == MEDIA_SOURCE_READYSTATE_ENDED) {
gf_mse_mediasource_open(sb->mediasource, NULL);
}
if (sb->append_state == MEDIA_SOURCE_APPEND_STATE_PARSING_MEDIA_SEGMENT) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
}
- if (!stricmp(smode, "segments")) {
- sb->append_mode = MEDIA_SOURCE_APPEND_MODE_SEGMENTS;
- } else if (!stricmp(smode, "sequence")) {
- sb->append_mode = MEDIA_SOURCE_APPEND_MODE_SEQUENCE;
- }
+ sb->append_mode = mode;
if (sb->append_mode == MEDIA_SOURCE_APPEND_MODE_SEQUENCE) {
- /* TODO */
+ sb->group_start_timestamp_flag = GF_TRUE;
+ sb->group_start_timestamp = sb->group_end_timestamp;
}
- SMJS_FREE(c, smode);
return JS_TRUE;
}
static SMJS_FUNC_PROP_GET(sourceBuffer_get_timestampOffset)
SB_BASIC_CHECK
- *vp = DOUBLE_TO_JSVAL(JS_NewDouble(c, sb->timestampOffset));
+ *vp = DOUBLE_TO_JSVAL(JS_NewDouble(c, sb->timestampOffset*1.0/sb->timescale));
return JS_TRUE;
}
static SMJS_FUNC_PROP_SET(sourceBuffer_set_timestampOffset)
jsdouble d;
- SB_BASIC_CHECK
+ SB_UPDATING_CHECK
+ if (sb->mediasource->readyState == MEDIA_SOURCE_READYSTATE_ENDED) {
+ gf_mse_mediasource_open(sb->mediasource, NULL);
+ }
+ if (sb->append_state == MEDIA_SOURCE_APPEND_STATE_PARSING_MEDIA_SEGMENT) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_STATE_ERR);
+ }
JS_ValueToNumber(c, *vp, &d);
- sb->timestampOffset = d;
+ gf_mse_source_buffer_set_timestampOffset(sb, d);
return JS_TRUE;
}
static SMJS_FUNC_PROP_SET(sourceBuffer_set_timescale)
SB_BASIC_CHECK
- sb->timescale = JSVAL_TO_INT(*vp);
+ gf_mse_source_buffer_set_timescale(sb, JSVAL_TO_INT(*vp));
return JS_TRUE;
}
static SMJS_FUNC_PROP_GET(sourceBuffer_get_appendWindowStart)
- SB_UPDATING_CHECK
+ SB_BASIC_CHECK
*vp = DOUBLE_TO_JSVAL(JS_NewDouble(c, sb->appendWindowStart));
return JS_TRUE;
}
static SMJS_FUNC_PROP_SET(sourceBuffer_set_appendWindowStart)
jsdouble d;
SB_UPDATING_CHECK
+ if (!JSVAL_IS_NUMBER(*vp)) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
JS_ValueToNumber(c, *vp, &d);
if (d < 0 || d >= sb->appendWindowEnd) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
sb->appendWindowStart = d;
return JS_TRUE;
}
static SMJS_FUNC_PROP_GET(sourceBuffer_get_appendWindowEnd)
- SB_UPDATING_CHECK
- *vp = DOUBLE_TO_JSVAL(JS_NewDouble(c, sb->appendWindowEnd));
+ SB_BASIC_CHECK
+ if (sb->appendWindowEnd == GF_MAX_DOUBLE) {
+ *vp = JS_GetPositiveInfinityValue(c);
+ } else {
+ *vp = DOUBLE_TO_JSVAL(JS_NewDouble(c, sb->appendWindowEnd));
+ }
return JS_TRUE;
}
static SMJS_FUNC_PROP_SET(sourceBuffer_set_appendWindowEnd)
jsdouble d;
SB_UPDATING_CHECK
+ if (!JSVAL_IS_NUMBER(*vp)) {
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
+ }
JS_ValueToNumber(c, *vp, &d);
if (d <= sb->appendWindowStart) {
- return JS_TRUE;
+ return dom_throw_exception(c, GF_DOM_EXC_INVALID_ACCESS_ERR);
}
sb->appendWindowEnd = d;
return JS_TRUE;
static SMJS_FUNC_PROP_GET(sourceBuffer_get_buffered)
SB_BASIC_CHECK
gf_mse_source_buffer_update_buffered(sb);
- *vp = OBJECT_TO_JSVAL(sb->buffered._this);
+ *vp = OBJECT_TO_JSVAL(sb->buffered->_this);
return JS_TRUE;
}
static SMJS_FUNC_PROP_GET(sourceBuffer_get_tracks)
SB_BASIC_CHECK
+ /* TODO */
return JS_TRUE;
}
GF_KeyCode gf_dom_get_key_type(char *key_name)
{
if (strlen(key_name) == 1) {
- unsigned char c[2];
+ char c[2];
c[0] = key_name[0];
c[1] = 0;
strupr(c);
if (c[0] >= '0' && c[0] <= '9')
return ( GF_KEY_0 + (c[0] - '0') );
- switch (c[0]) {
+ switch ((u8) c[0]) {
case '@': return GF_KEY_AT;
case '*': return GF_KEY_STAR;
case '#': return GF_KEY_NUMBER;
if (!cx) JS_RemoveValueRootRT(js_rt->js_runtime, rp);
else
#endif
- JS_RemoveValueRoot(cx, rp);
+ JS_RemoveValueRoot(cx, (jsval *) rp);
break;
default:
if (cx) JS_RemoveGCThingRoot(cx, rp);
GF_JSField *ptr = (GF_JSField *) SMJS_GET_PRIVATE(c, obj);
if (!JSVAL_IS_INT(*vp) || JSVAL_TO_INT(*vp) < 0) return JS_FALSE;
/*avoids gcc warning*/
+#ifndef GPAC_CONFIG_DARWIN
if (!id) id=0;
- len = JSVAL_TO_INT(*vp);
+#endif
+ len = JSVAL_TO_INT(*vp);
if (!len) {
jsuint len;
GF_JSField *ptr = (GF_JSField *) SMJS_GET_PRIVATE(c, obj);
/*avoids gcc warning*/
+#ifndef GPAC_CONFIG_DARWIN
if (!id) id=0;
-
+#endif
if (ptr->field.fieldType==GF_SG_VRML_MFNODE) {
len = gf_node_list_get_count(*(GF_ChildNodeItem **)ptr->field.far_ptr);
ret = JS_TRUE;
{
ch->pck_sn = 0;
- /*if using RAP signal and codec not resilient, wait for rap. If RAP isn't signaled DON'T wait for it :)*/
- if (!ch->codec_resilient)
+ /*if using RAP signal and codec not resilient, wait for rap. If RAP isn't signaled, this will be ignored*/
+ if (ch->codec_resilient != GF_CODEC_RESILIENT_ALWAYS)
ch->stream_state = 2;
if (ch->buffer) gf_free(ch->buffer);
ch->buffer = NULL;
if (ch->BufferTime < (s32) ch->MaxBuffer) {
/*check last AU time*/
u32 now = gf_term_get_time(ch->odm->term);
- /*if more than half sec since last AU don't buffer and prevent rebuffering on short streams
- this will also work for channels ignoring timing*/
- if (now>ch->last_au_time + MAX(ch->BufferTime, 500) ) {
+ /*if more than MaxBuffer sec since last AU don't buffer and prevent rebuffering on short streams
+ this will also work for channels ignoring timing
+ we use MaxBuffer as some transport protocols (HTTP streaming, DVB-H) will work in burst modes of MaxBuffer
+ */
+ if (now > ch->last_au_time + 2*ch->MaxBuffer ) {
/*this can be safely seen as a stream with very few updates (likely only one)*/
- if (!ch->AU_buffer_first && ch->first_au_fetched) ch->MinBuffer = 0;
+ if (!ch->AU_buffer_first && ch->first_au_fetched)
+ ch->MinBuffer = 0;
return 0;
}
return 1;
ch->au_duration = 0;
if (duration) ch->au_duration = (u32) ((u64)1000 * duration / ch->ts_res);
- GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d - Dispatch AU DTS %d - CTS %d - RAP %d - size %d time %d Buffer %d Nb AUs %d - First AU relative timing %d\n", ch->esd->ESID, au->DTS, au->CTS, au->flags&1, au->dataLength, gf_clock_real_time(ch->clock), ch->BufferTime, ch->AU_Count, ch->AU_buffer_first ? ch->AU_buffer_first->DTS - gf_clock_time(ch->clock) : 0 ));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d (%s) - Dispatch AU DTS %d - CTS %d - RAP %d - size %d time %d Buffer %d Nb AUs %d - First AU relative timing %d\n", ch->esd->ESID, ch->odm->net_service->url, au->DTS, au->CTS, au->flags&1, au->dataLength, gf_clock_real_time(ch->clock), ch->BufferTime, ch->AU_Count, ch->AU_buffer_first ? ch->AU_buffer_first->DTS - gf_clock_time(ch->clock) : 0 ));
/*little optimisation: if direct dispatching is possible, try to decode the AU
we must lock the media scheduler to avoid deadlocks with other codecs accessing the scene or
/*get RAP*/
if (ch->esd->slConfig->hasRandomAccessUnitsOnlyFlag) {
hdr.randomAccessPointFlag = 1;
- } else if ((ch->carousel_type!=GF_ESM_CAROUSEL_MPEG2) && (!ch->esd->slConfig->useRandomAccessPointFlag || ch->codec_resilient) ) {
+ } else if ((ch->carousel_type!=GF_ESM_CAROUSEL_MPEG2) && (!ch->esd->slConfig->useRandomAccessPointFlag || (ch->codec_resilient==GF_CODEC_RESILIENT_ALWAYS) ) ) {
ch->stream_state = 0;
}
init_ts = 1;
}
+
+ /*if we had a previous buffer, add or discard it, depending on codec resilience*/
+ if (hdr.accessUnitStartFlag && ch->buffer) {
+ if (ch->esd->slConfig->useAccessUnitEndFlag) {
+ GF_LOG(GF_LOG_WARNING, GF_LOG_SYNC, ("[SyncLayer] ES%d: missed end of AU (DTS %d)\n", ch->esd->ESID, ch->DTS));
+ }
+ if (ch->codec_resilient) {
+ if (!ch->IsClockInit && !ch->skip_time_check_for_pending) gf_es_check_timing(ch);
+ Channel_DispatchAU(ch, 0);
+ } else {
+ gf_free(ch->buffer);
+ ch->buffer = NULL;
+ ch->AULength = 0;
+ ch->len = ch->allocSize = 0;
+ }
+ }
+
if (init_ts) {
/*Get CTS */
if (ch->esd->slConfig->useTimestampsFlag) {
/*until clock is not init check seed ts*/
if (!ch->IsClockInit && (ch->net_dts < ch->seed_ts))
ch->seed_ts = ch->net_dts;
-#endif
-
+#endif
if (ch->net_cts<ch->seed_ts) {
u64 diff = ch->seed_ts - ch->net_cts;
ch->DTS = (u32) (ch->ts_offset + (s64) (ch->net_dts) * 1000 / ch->ts_res);
}
+ if (ch->odm->parentscene && ch->odm->parentscene->root_od->addon) {
+ ch->DTS = (u32) gf_scene_adjust_timestamp_for_addon(ch->odm->parentscene, ch->DTS, ch->odm->parentscene->root_od->addon);
+ ch->CTS = (u32) gf_scene_adjust_timestamp_for_addon(ch->odm->parentscene, ch->CTS, ch->odm->parentscene->root_od->addon);
+ }
+
if (ch->clock->probe_ocr && gf_es_owns_clock(ch)) {
s32 diff_ts = ch->DTS;
diff_ts -= ch->clock->init_time;
}
}
-
- /*if we had a previous buffer, add or discard it, depending on codec resilience*/
- if (hdr.accessUnitStartFlag && ch->buffer) {
- if (ch->esd->slConfig->useAccessUnitEndFlag) {
- GF_LOG(GF_LOG_WARNING, GF_LOG_SYNC, ("[SyncLayer] ES%d: missed end of AU (DTS %d)\n", ch->esd->ESID, ch->DTS));
- }
- if (ch->codec_resilient) {
- if (!ch->IsClockInit && !ch->skip_time_check_for_pending) gf_es_check_timing(ch);
- Channel_DispatchAU(ch, 0);
- } else {
- gf_free(ch->buffer);
- ch->buffer = NULL;
- ch->AULength = 0;
- ch->len = ch->allocSize = 0;
- }
- }
-
/*update the RAP marker on a packet base (to cope with AVC/H264 NALU->AU reconstruction)*/
if (hdr.randomAccessPointFlag) ch->IsRap = 1;
if (ch->es_state != GF_ESM_ES_RUNNING) return NULL;
if (!ch->is_pulling) {
- if (!ch->AU_buffer_first) {
+ gf_mx_p(ch->mx);
+
+ if (!ch->AU_buffer_first || (ch->BufferTime < (s32) ch->MaxBuffer/2) ) {
/*query buffer level, don't sleep if too low*/
GF_NetworkCommand com;
com.command_type = GF_NET_SERVICE_FLUSH_DATA;
/*we must update buffering before fetching in order to stop buffering for streams with very few
updates (especially streams with one update, like most of OD streams)*/
- if (ch->BufferOn) Channel_UpdateBuffering(ch, 0);
+ if (ch->BufferOn && ch->AU_buffer_first) Channel_UpdateBuffering(ch, 0);
+ gf_mx_v(ch->mx);
+
if (ch->BufferOn) {
if (ch->first_au_fetched || !ch->AU_buffer_first || !ch->AU_buffer_first->next)
return NULL;
return NULL;
}
}
- GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d - Dispatch Pull AU DTS %d - CTS %d - size %d time %d - UTC "LLU" ms\n", ch->esd->ESID, ch->DTS, ch->CTS, ch->AU_buffer_pull->dataLength, gf_clock_real_time(ch->clock), gf_net_get_utc() ));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d (%s) - Dispatch Pull AU DTS %d - CTS %d - size %d time %d - UTC "LLU" ms\n", ch->esd->ESID, ch->odm->net_service->url, ch->DTS, ch->CTS, ch->AU_buffer_pull->dataLength, gf_clock_real_time(ch->clock), gf_net_get_utc() ));
}
/*this may happen in file streaming when data has not arrived yet, in which case we discard the AU*/
if (ch->pull_forced_buffer) {
assert(ch->BufferOn);
- gf_term_service_media_event(ch->odm, GF_EVENT_MEDIA_PLAYING);
ch->pull_forced_buffer=0;
gf_es_buffer_off(ch);
+ Channel_UpdateBuffering(ch, 1);
+ } else if (is_new_data && !ch->first_au_fetched) {
+ Channel_UpdateBuffering(ch, 1);
}
+
return ch->AU_buffer_pull;
}
void gf_clock_pause(GF_Clock *ck)
{
gf_mx_p(ck->mx);
- if (!ck->Paused) ck->PauseTime = gf_term_get_time(ck->term);
+ if (!ck->Paused)
+ ck->PauseTime = gf_term_get_time(ck->term);
ck->Paused += 1;
gf_mx_v(ck->mx);
}
{
gf_mx_p(ck->mx);
assert(ck->Paused);
+ if (!ck->Paused) {
+ assert(!ck->Buffering);
+ }
ck->Paused -= 1;
if (!ck->Paused)
ck->StartTime += gf_term_get_time(ck->term) - ck->PauseTime;
GF_Codec *gf_codec_new(GF_ObjectManager *odm, GF_ESD *base_layer, s32 PL, GF_Err *e)
{
GF_Codec *tmp;
+
+ //this is an addon, we must check if it's scalable stream or not ...
+ //if so, do not create any new codec
+ if (odm->parentscene && odm->parentscene->root_od->addon) {
+ switch (base_layer->decoderConfig->objectTypeIndication) {
+ case GPAC_OTI_VIDEO_SHVC:
+ case GPAC_OTI_VIDEO_SVC:
+ odm->scalable_addon = 1;
+ odm->parentscene->root_od->addon->scalable_type = 1;
+ *e = GF_OK;
+ //fixme - we need a way to signal dependencies accross services!!
+ base_layer->dependsOnESID = 0xFFFF;
+ return NULL;
+ default:
+ break;
+ }
+ }
+
GF_SAFEALLOC(tmp, GF_Codec);
if (! tmp) {
*e = GF_OUT_OF_MEM;
tmp->Status = GF_ESM_CODEC_STOP;
if (tmp->type==GF_STREAM_PRIVATE_MEDIA) tmp->type = GF_STREAM_VISUAL;
-
+
+ if (tmp->type==GF_STREAM_VISUAL) {
+ GF_CodecCapability cap;
+ cap.CapCode = GF_CODEC_DISPLAY_BPP;
+ cap.cap.valueInt = odm->term->compositor->video_out->max_screen_bpp;
+ gf_codec_set_capability(tmp, cap);
+ }
tmp->Priority = base_layer->streamPriority ? base_layer->streamPriority : 1;
GF_LOG(GF_LOG_INFO, GF_LOG_CODEC, ("[Codec] Found decoder %s for stream type %s\n", tmp->decio ? tmp->decio->module_name : "RAW", gf_esd_get_textual_description(base_layer) ));
cap.CapCode = GF_CODEC_REORDER;
if (gf_codec_get_capability(codec, &cap) == GF_OK)
codec->is_reordering = cap.cap.valueInt;
+
+ codec->trusted_cts = 0;
+ cap.CapCode = GF_CODEC_TRUSTED_CTS;
+ if (gf_codec_get_capability(codec, &cap) == GF_OK)
+ codec->trusted_cts = cap.cap.valueInt;
+
}
if (codec->flags & GF_ESM_CODEC_IS_RAW_MEDIA) {
}
}
-
/*assign the first base layer as the codec clock by default, or current channel clock if no clock set
Also assign codec priority here*/
if (!ch->esd->dependsOnESID || !codec->ck) {
}
}
+Bool gf_codec_is_scene_or_image(GF_Codec *codec)
+{
+ if (!codec) return GF_TRUE;
+ if (!codec->CB) return GF_TRUE;
+ if (codec->CB->Capacity>1 || codec->CB->no_allocation) return GF_FALSE;
+ return GF_TRUE;
+}
+
Bool gf_codec_remove_channel(GF_Codec *codec, struct _es_channel *ch)
{
s32 i;
}
-static void codec_update_stats(GF_Codec *codec, u32 dataLength, u32 dec_time, u32 DTS)
+static void codec_update_stats(GF_Codec *codec, u32 dataLength, u64 dec_time, u32 DTS)
{
codec->total_dec_time += dec_time;
codec->last_frame_time = gf_sys_clock();
{
GF_Channel *ch;
GF_DBUnit *AU;
+ GF_List *src_channels = codec->inChannels;
+ GF_ObjectManager *current_odm = codec->odm;
u32 count, curCTS, i;
- count = gf_list_count(codec->inChannels);
+
*nextAU = NULL;
*activeChannel = NULL;
+ curCTS = 0;
+
+browse_scalable:
+ count = gf_list_count(src_channels);
if (!count) return;
- curCTS = 0;
-
/*browse from base to top layer*/
for (i=0;i<count;i++) {
- ch = (GF_Channel*)gf_list_get(codec->inChannels, i);
+ ch = (GF_Channel*)gf_list_get(src_channels, i);
if ((codec->type==GF_STREAM_OCR) && ch->IsClockInit) {
/*check duration - we assume that scalable OCR streams are just pure nonsense...*/
//gf_es_drop_au(ch);
continue;
}
- GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d AU CTS %d selected as first layer (DTS %d)\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, AU->CTS, AU->DTS));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d (%s) AU DTS %d (size %d) selected as first layer (CTS %d)\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, ch->odm->net_service->url, AU->DTS, AU->dataLength, AU->CTS));
*nextAU = AU;
*activeChannel = ch;
curCTS = AU->CTS;
baseAU->data = gf_realloc(baseAU->data, baseAU->dataLength + AU->dataLength);
memcpy(baseAU->data + baseAU->dataLength , AU->data, AU->dataLength);
baseAU->dataLength += AU->dataLength;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d AU CTS %d reaggregated on base layer %d\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, AU->CTS, (*activeChannel)->esd->ESID));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d (%s) AU DTS %d reaggregated on base layer %d\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, ch->odm->net_service->url, AU->DTS, (*activeChannel)->esd->ESID));
gf_es_drop_au(ch);
ch->first_au_fetched = 1;
}
// AU found with the same CTS as the current base, we either had a drop on the base or some temporal scalability - aggregate from current channel.
else {
//we cannot tell whether this is a loss or temporal scalable, don't attempt to discard the AU
- GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d AU CTS %d doesn't have the same CTS as the base (%d)- selected as first layer\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, AU->CTS, (*nextAU)->CTS));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d (%s) AU CTS %d doesn't have the same CTS as the base (%d)- selected as first layer\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, ch->odm->net_service->url, AU->CTS, (*nextAU)->CTS));
*nextAU = AU;
*activeChannel = ch;
- (*activeChannel)->prev_aggregated_dts = (*nextAU)->DTS;
curCTS = AU->CTS;
}
}
//we can rely on DTS - if DTS is earlier on the enhencement, this is a loss or temporal scalability
else if (AU->DTS < (*nextAU)->DTS) {
//Sample with the same DTS of this AU has been decoded. This is a loss, we need to drop it and re-fetch this channel
- if (AU->DTS < (*activeChannel)->prev_aggregated_dts) {
- GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d AU CTS %d: loss detected - re-fetch channel\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, AU->CTS));
+ if (AU->DTS <= codec->last_unit_dts)
+ {
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d %s AU DTS %d but base DTS %d: loss detected - re-fetch channel\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, ch->odm->net_service->url, AU->DTS, (*nextAU)->DTS));
gf_es_drop_au(ch);
goto refetch_AU;
}
//This is a temporal scalability so we re-aggregate from the enhencement
else {
- GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d AU CTS %d selected as first layer\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, AU->CTS));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d (%s) AU DTS %d selected as first layer (CTS %d)\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, ch->odm->net_service->url, AU->DTS, AU->CTS));
*nextAU = AU;
*activeChannel = ch;
- (*activeChannel)->prev_aggregated_dts = (*nextAU)->DTS;
curCTS = AU->CTS;
}
}
}
}
-
- if (*nextAU)
- (*activeChannel)->prev_aggregated_dts = (*nextAU)->DTS;
+ //scalable addon, browse channels in scalable object
+ if (current_odm->scalable_odm) {
+ current_odm = current_odm->scalable_odm;
+ src_channels = current_odm->channels;
+ goto browse_scalable;
+ }
if (codec->is_reordering && *nextAU && codec->first_frame_dispatched) {
if ((*activeChannel)->esd->slConfig->no_dts_signaling) {
(*nextAU)->CTS = (*nextAU)->DTS;
}
}
+
}
/*scalable browsing of input channels: find the AU with the lowest DTS on all input channels*/
{
GF_DBUnit *AU;
GF_Channel *ch;
- u32 now, obj_time, mm_level, au_time, cts;
+ u32 obj_time, mm_level, au_time, cts;
+ u64 now;
GF_Scene *scene_locked;
Bool check_next_unit;
GF_SceneDecoder *sdec = (GF_SceneDecoder *)codec->decio;
updates in time*/
codec->odm->current_time = gf_clock_time(codec->ck);
- now = gf_term_get_time(codec->odm->term);
+ now = gf_sys_clock_high_res();
if (codec->odm->term->bench_mode==2) {
e = GF_OK;
} else {
e = sdec->ProcessData(sdec, AU->data, AU->dataLength, ch->esd->ESID, au_time, mm_level);
}
- now = gf_term_get_time(codec->odm->term) - now;
+ now = gf_sys_clock_high_res() - now;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d at %d decoded AU TS %d in %d ms\n", sdec->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, codec->odm->current_time, AU->CTS, now));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d at %d decoded AU TS %d in "LLU" us\n", sdec->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, codec->odm->current_time, AU->CTS, now));
codec_update_stats(codec, AU->dataLength, now, AU->DTS);
codec->prev_au_size = AU->dataLength;
/*special handling of decoders not using ESM*/
static GF_Err PrivateScene_Process(GF_Codec *codec, u32 TimeAvailable)
{
- u32 now;
+ u64 now;
GF_Channel *ch;
GF_Scene *scene_locked;
GF_SceneDecoder *sdec = (GF_SceneDecoder *)codec->decio;
if (!gf_mx_try_lock(scene_locked->root_od->term->compositor->mx)) return GF_OK;
- now = gf_term_get_time(codec->odm->term);
+ now = gf_sys_clock_high_res();
if (codec->odm->term->bench_mode == 2) {
e = GF_OK;
} else {
e = sdec->ProcessData(sdec, NULL, 0, ch->esd->ESID, codec->odm->current_time, GF_CODEC_LEVEL_NORMAL);
}
- now = gf_term_get_time(codec->odm->term) - now;
+ now = gf_sys_clock_high_res() - now;
codec->last_unit_dts ++;
/*resume on error*/
if (e && (codec->last_unit_dts<2) ) {
static GFINLINE GF_Err UnlockCompositionUnit(GF_Codec *dec, GF_CMUnit *CU, u32 cu_size)
{
-
- /*temporal scalability disabling: if we already rendered this, no point getting further*/
-/*
- if (CU->TS < dec->CB->LastRenderedTS) {
- GF_LOG(GF_LOG_INFO, GF_LOG_CODEC, ("[ODM] CU (TS %d) later than last frame drawn (TS %d) - droping\n", CU->TS, dec->CB->LastRenderedTS));
- cu_size = 0;
- }
-
-*/
-
- if (dec->is_reordering) {
+ if (dec->is_reordering && !dec->trusted_cts) {
/*first dispatch from decoder, store CTS*/
if (!dec->first_frame_dispatched) {
dec->recomputed_cts = CU->TS;
GF_DBUnit *AU;
GF_Channel *ch, *prev_ch;
Bool drop_late_frames = 0;
+ u64 now, entryTime;
u32 mmlevel, cts;
- u32 first, entryTime, now, obj_time, unit_size;
+ u32 first, obj_time, unit_size;
GF_MediaDecoder *mdec = (GF_MediaDecoder*)codec->decio;
GF_Err e = GF_OK;
CU = NULL;
if audio codec muted we dispatch to keep sync in place*/
if (codec->Muted && (codec->type==GF_STREAM_VISUAL) ) return GF_OK;
- entryTime = gf_term_get_time(codec->odm->term);
+ //cannot output frame, do nothing (we force a channel query before for pull mode)
+ if (codec->CB->Capacity == codec->CB->UnitCount) {
+ if (codec->CB->UnitCount > 1) return GF_OK;
+ else if (codec->direct_vout) return GF_OK;
+ }
+
+ entryTime = gf_sys_clock_high_res();
if (!codec->odm->term->bench_mode && (codec->odm->term->flags & GF_TERM_DROP_LATE_FRAMES))
drop_late_frames = 1;
- //cannot output frame, do nothing (we force a channel query before for pull mode)
- if ( (codec->CB->UnitCount > 1) && (codec->CB->Capacity == codec->CB->UnitCount) )
- return GF_OK;
/*fetch next AU in DTS order for this codec*/
MediaDecoder_GetNextAU(codec, &ch, &AU);
assert( CU );
unit_size = 0;
if (codec->odm->term->bench_mode != 2) {
- e = mdec->ProcessData(mdec, NULL, 0, 0, CU->data, &unit_size, 0, 0);
+ e = mdec->ProcessData(mdec, NULL, 0, 0, &CU->TS, CU->data, &unit_size, 0, 0);
if (e==GF_OK) {
e = UnlockCompositionUnit(codec, CU, unit_size);
if (unit_size) return GF_OK;
}
}
/*if no data, and channel not buffering, ABORT CB buffer (data timeout or EOS not detectable)*/
- else if (ch && !ch->BufferOn && !ch->last_au_was_seek)
+ else if (ch && !ch->is_pulling && !ch->BufferOn && !ch->last_au_was_seek)
gf_cm_abort_buffering(codec->CB);
//GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d: No data in decoding buffer\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID));
/*get the object time*/
obj_time = gf_clock_time(codec->ck);
+
/*Media Time for media codecs is updated in the CB*/
if (!codec->CB) {
scalable_retry:
- now = gf_term_get_time(codec->odm->term);
+ now = gf_sys_clock_high_res();
assert( CU );
if (!CU->data && unit_size && !codec->CB->no_allocation) {
gf_cm_abort_buffering(codec->CB);
} else {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d ES%d at %d decoding frame DTS %d CTS %d size %d (%d in channels)\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_real_time(ch->clock), AU->DTS, AU->CTS, AU->dataLength, ch->AU_Count));
- e = mdec->ProcessData(mdec, AU->data, AU->dataLength, ch->esd->ESID, CU->data, &unit_size, AU->PaddingBits, mmlevel);
+ e = mdec->ProcessData(mdec, AU->data, AU->dataLength, ch->esd->ESID, &CU->TS, CU->data, &unit_size, AU->PaddingBits, mmlevel);
}
- now = gf_term_get_time(codec->odm->term) - now;
+ now = gf_sys_clock_high_res() - now;
if (codec->Status == GF_ESM_CODEC_STOP) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Exit decode loop because codec has been stopped\n", codec->decio->module_name));
return GF_OK;
}
e = UnlockCompositionUnit(codec, CU, unit_size);
- GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d ES%d at %d decoded packed frame TS %d in %d ms\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_real_time(ch->clock), AU->CTS, now));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d ES%d at %d decoded packed frame TS %d in "LLU" us\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_real_time(ch->clock), AU->CTS, now));
if (ch->skip_sl) {
if (codec->bytes_per_sec) {
codec->cur_audio_bytes += unit_size;
processing a scalable stream*/
case GF_OK:
if (unit_size) {
- GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d ES%d at %d decoded frame TS %d in %d ms (DTS %d - size %d) - %d in CB\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_real_time(ch->clock), AU->CTS, now, AU->DTS, AU->dataLength, codec->CB->UnitCount + 1));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d ES%d at %d decoded frame TS %d in "LLU" us (DTS %d - size %d) - %d in CB\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_real_time(ch->clock), AU->CTS, now, AU->DTS, AU->dataLength, codec->CB->UnitCount + 1));
if (codec->direct_vout) {
e = mdec->GetOutputBuffer(mdec, ch->esd->ESID, &codec->CB->pY, &codec->CB->pU, &codec->CB->pV);
+ if (e==GF_OK) {
+ gf_sc_set_video_pending_frame(codec->odm->term->compositor);
+ }
}
}
- /*if no size the decoder is not using the composition memory - if the object is in intitial buffering resume it!!*/
- else if (codec->CB->Status == CB_BUFFER) {
+#if 0
+ /*if no size and the decoder is not using the composition memory - if the object is in intitial buffering resume it!!*/
+ else if ( (!codec->CB->UnitSize && !codec->CB->Capacity) && (codec->CB->Status == CB_BUFFER)) {
codec->nb_dispatch_skipped++;
if (codec->nb_dispatch_skipped==codec->CB->UnitCount)
gf_cm_abort_buffering(codec->CB);
}
+#endif
codec_update_stats(codec, AU->dataLength, now, AU->DTS);
if (ch->skip_sl) {
unit_size = 0;
/*error - if the object is in intitial buffering resume it!!*/
gf_cm_abort_buffering(codec->CB);
- GF_LOG(GF_LOG_INFO, GF_LOG_CODEC, ("[%s] ODM%d ES%d At %d (frame TS %d - %d ms ): decoded error %s\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_real_time(ch->clock), AU->CTS, now, gf_error_to_string(e) ));
+ GF_LOG(GF_LOG_INFO, GF_LOG_CODEC, ("[%s] ODM%d ES%d At %d (frame TS %d - "LLU" us ): decoded error %s\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_real_time(ch->clock), AU->CTS, now, gf_error_to_string(e) ));
e = GF_OK;
break;
}
#ifndef GPAC_DISABLE_LOG
if (unit_size) {
if (ch->is_pulling) {
- GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[%s] at %d decoded frame CTS %d in %d ms\n", codec->decio->module_name, gf_clock_real_time(ch->clock), AU->CTS, now));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[%s] at %d decoded frame CTS %d in "LLU" us\n", codec->decio->module_name, gf_clock_real_time(ch->clock), AU->CTS, now));
} else {
- GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[%s] at %d decoded frame CTS %d in %d ms - %d AU in channel\n", codec->decio->module_name, gf_clock_real_time(ch->clock), AU->CTS, now, ch->AU_Count));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[%s] at %d decoded frame CTS %d in "LLU" us - %d AU in channel\n", codec->decio->module_name, gf_clock_real_time(ch->clock), AU->CTS, now, ch->AU_Count));
}
}
#endif
GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Exit decode loop because no more input data\n", codec->decio->module_name));
return GF_OK;
}
- now = gf_term_get_time(codec->odm->term) - entryTime;
+ now = gf_sys_clock_high_res() - entryTime;
/*escape from decoding loop only if above critical limit - this is to avoid starvation on audio*/
if (!ch->esd->dependsOnESID && (codec->CB->UnitCount > codec->CB->Min)) {
if (now >= TimeAvailable) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Exit decode loop because time is up: %d vs %d available\n", codec->decio->module_name, now, TimeAvailable));
return GF_OK;
}
- } else if (now >= 10*TimeAvailable) {
- GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Exit decode loop because running for too long: %d vs %d available\n", codec->decio->module_name, now, TimeAvailable));
+ } else if (now >= 10000*TimeAvailable) {
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Exit decode loop because running for too long: %d vs %d available\n", codec->decio->module_name, now/1000, TimeAvailable));
+ return GF_OK;
+ } else if (codec->odm->term->bench_mode) {
return GF_OK;
}
- MediaDecoder_GetNextAU(codec, &ch, &AU);
- if (!ch || !AU) return GF_OK;
}
return GF_OK;
}
codec->prev_au_size = 0;
codec->Status = Status;
codec->last_stat_start = codec->cur_bit_size = codec->max_bit_rate = codec->avg_bit_rate = 0;
- codec->nb_dec_frames = codec->total_dec_time = codec->max_dec_time = 0;
+ codec->nb_dec_frames = 0;
+ codec->total_dec_time = codec->max_dec_time = 0;
codec->cur_audio_bytes = codec->cur_video_frames = 0;
codec->nb_droped = 0;
codec->nb_repeted_frames = 0;
stack->media_start = stack->control->mediaStartTime;
stack->media_stop = stack->control->mediaStopTime;
stack->is_init = 1;
+ stack->paused = 0;
/*the object has already been started, and media start time is not 0, restart*/
if (stack->stream->num_open) {
if ( (stack->media_start > 0) || (gf_list_count(stack->seg)>0 ) ) {
if (!stack) return;
if (stack->changed!=2) {
/*check URL*/
- if (MC_URLChanged(&stack->url, &stack->control->url)) stack->changed = 2;
+ if (MC_URLChanged(&stack->url, &stack->control->url))
+ stack->changed = 2;
/*check speed (play/pause)*/
else if (stack->media_speed != stack->control->mediaSpeed)
stack->changed = 1;
return 0;
}
-static u32 MM_SimulationStep_Decoder(GF_Terminal *term)
+static u32 MM_SimulationStep_Decoder(GF_Terminal *term, u32 *nb_active_decs)
{
CodecEntry *ce;
GF_Err e;
count = gf_list_count(term->codecs);
time_left = term->frame_duration;
+ *nb_active_decs = 0;
if (term->last_codec >= count) term->last_codec = 0;
remain = count;
- time_taken = 0;
/*this is ultra basic a nice scheduling system would be much better*/
while (remain) {
ce = (CodecEntry*)gf_list_get(term->codecs, term->last_codec);
time_slice = ce->dec->Priority * time_left / term->cumulated_priority;
if (ce->dec->PriorityBoost) time_slice *= 2;
time_taken = gf_sys_clock();
-
+ (*nb_active_decs) ++;
e = gf_codec_process(ce->dec, time_slice);
time_taken = gf_sys_clock() - time_taken;
/*avoid signaling errors too often...*/
// GF_LOG(GF_LOG_DEBUG, GF_LOG_RTI, ("(RTI] Terminal Cycle Log\tServices\tDecoders\tCompositor\tSleep\n"));
while (term->flags & GF_TERM_RUNNING) {
+ u32 nb_decs = 0;
u32 left = 0;
- if (do_codec) left = MM_SimulationStep_Decoder(term);
+ if (do_codec) left = MM_SimulationStep_Decoder(term, &nb_decs);
else left = term->frame_duration;
if (do_scene) {
+ u32 ms_until_next=0;
u32 time_taken = gf_sys_clock();
- gf_sc_draw_frame(term->compositor);
+ gf_sc_draw_frame(term->compositor, &ms_until_next);
time_taken = gf_sys_clock() - time_taken;
- if (left>time_taken)
+ if (ms_until_next<term->frame_duration/2) {
+ left = 0;
+ } else if (left>time_taken)
left -= time_taken;
else
left = 0;
}
-
if (do_regulate) {
if (term->bench_mode) {
gf_sleep(0);
} else {
if (left==term->frame_duration) {
- gf_sleep(term->frame_duration/2);
+ //if nothing was done during this pass but we have active decoder, just yield. We don't want to sleep since
+ //composition memory could be released at any time. We should have a signal here, rather than a wait
+ gf_sleep(nb_decs ? 0 : term->frame_duration/2);
}
}
}
u32 RunSingleDec(void *ptr)
{
GF_Err e;
- u32 time_left;
+ u64 time_taken;
CodecEntry *ce = (CodecEntry *) ptr;
GF_LOG(GF_LOG_DEBUG, GF_LOG_CORE, ("[MediaDecoder %d] Entering thread ID %d\n", ce->dec->odm->OD->objectDescriptorID, gf_th_id() ));
while (ce->flags & GF_MM_CE_RUNNING) {
- time_left = gf_sys_clock();
+ time_taken = gf_sys_clock_high_res();
if (!ce->dec->force_cb_resize) {
gf_mx_p(ce->mx);
e = gf_codec_process(ce->dec, ce->dec->odm->term->frame_duration);
if (e) gf_term_message(ce->dec->odm->term, ce->dec->odm->net_service->url, "Decoding Error", e);
gf_mx_v(ce->mx);
}
- time_left = gf_sys_clock() - time_left;
+ time_taken = gf_sys_clock_high_res() - time_taken;
/*no priority boost this way for systems codecs, priority is dynamically set by not releasing the
/*while on don't sleep*/
if (ce->dec->PriorityBoost) continue;
- if (time_left) {
+ if (time_taken<20) {
gf_sleep(1);
- } else {
- gf_sleep(ce->dec->odm->term->frame_duration/2);
}
}
ce->flags |= GF_MM_CE_DEAD;
GF_CodecCapability cap;
CodecEntry *ce;
GF_Terminal *term = codec->odm->term;
+ if (!gf_list_count(codec->odm->channels)) return;
ce = mm_get_codec(term->codecs, codec);
if (!ce) return;
}
}
+
/*unlock dec*/
if (ce->mx)
gf_mx_v(ce->mx);
GF_EXPORT
u32 gf_term_process_step(GF_Terminal *term)
{
+ u32 nb_decs=0;
u32 time_taken = gf_sys_clock();
if (term->flags & GF_TERM_NO_DECODER_THREAD) {
- MM_SimulationStep_Decoder(term);
+ MM_SimulationStep_Decoder(term, &nb_decs);
}
if (term->flags & GF_TERM_NO_COMPOSITOR_THREAD) {
- gf_sc_draw_frame(term->compositor);
+ u32 ms_until_next;
+ gf_sc_draw_frame(term->compositor, &ms_until_next);
+ if (ms_until_next<term->compositor->frame_duration/2) {
+ time_taken=0;
+ }
+
}
time_taken = gf_sys_clock() - time_taken;
if (time_taken > term->compositor->frame_duration) {
if (term->bench_mode || (term->user->init_flags & GF_TERM_NO_REGULATION)) return time_taken;
if (2*time_taken >= term->compositor->frame_duration) {
- gf_sleep(time_taken);
+ gf_sleep(nb_decs ? 1 : time_taken);
}
return time_taken;
}
gf_mx_v(term->mm_mx);
}
- if (!gf_sc_draw_frame(term->compositor))
+ if (!gf_sc_draw_frame(term->compositor, NULL))
break;
if (! (term->user->init_flags & GF_TERM_NO_REGULATION))
void gf_cm_del(GF_CompositionMemory *cb)
{
gf_odm_lock(cb->odm, 1);
- /*may happen when CB is destroyed right after creation in case*/
+ /*may happen when CB is destroyed right after creation */
if (cb->Status == CB_BUFFER) {
gf_clock_buffer_off(cb->odm->codec->ck);
- GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ODM%d: buffering off at %d (nb buffering on clock: %d)\n", cb->odm->OD->objectDescriptorID, gf_term_get_time(cb->odm->term), cb->odm->codec->ck->Buffering));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] CB destroy - ODM%d: buffering off at %d (nb buffering on clock: %d)\n", cb->odm->OD->objectDescriptorID, gf_term_get_time(cb->odm->term), cb->odm->codec->ck->Buffering));
}
if (cb->input){
/*break the loop and destroy*/
#endif
}
+static void cb_set_buffer_off(GF_CompositionMemory *cb)
+{
+ gf_clock_buffer_off(cb->odm->codec->ck);
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] CB Buffering done ODM%d: buffering off at %d (nb buffering on clock: %d)\n", cb->odm->OD->objectDescriptorID, gf_term_get_time(cb->odm->term), cb->odm->codec->ck->Buffering));
+
+ gf_term_service_media_event(cb->odm->parentscene->root_od, GF_EVENT_MEDIA_CANPLAY);
+}
+
void gf_cm_unlock_input(GF_CompositionMemory *cb, GF_CMUnit *cu, u32 cu_size, Bool codec_reordering)
{
/*nothing dispatched, ignore*/
cu->dataLength = cu_size;
cu->RenderedLength = 0;
- /*turn off buffering - this must be done now rather than when fetching first output frame since we're not
+ /*turn off buffering for audio - this must be done now rather than when fetching first output frame since we're not
sure output is fetched (Switch node, ...)*/
- if ( (cb->Status == CB_BUFFER) && (cb->UnitCount >= cb->Capacity) ) {
+ if ( (cb->Status == CB_BUFFER) && (cb->UnitCount >= cb->Capacity) && (cb->odm->codec->type == GF_STREAM_AUDIO)) {
/*done with buffering, signal to the clock (ONLY ONCE !)*/
cb->Status = CB_BUFFER_DONE;
- gf_clock_buffer_off(cb->odm->codec->ck);
-// cb->odm->codec->ck->data_timeout = 0;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ODM%d: buffering off at %d (nb buffering on clock: %d)\n", cb->odm->OD->objectDescriptorID, gf_term_get_time(cb->odm->term), cb->odm->codec->ck->Buffering));
-
- gf_term_service_media_event(cb->odm->parentscene->root_od, GF_EVENT_MEDIA_CANPLAY);
+ cb_set_buffer_off(cb);
}
//new FPS regulation doesn't need this signaling
/*force update of media time*/
mediasensor_update_timing(cb->odm, 1);
#endif
+ gf_odm_signal_eos(cb->odm);
}
return NULL;
}
}
-
/*drop the output CU*/
-void gf_cm_drop_output(GF_CompositionMemory *cb)
+void gf_cm_output_kept(GF_CompositionMemory *cb)
{
assert(cb->UnitCount);
/*this allows reuse of the CU*/
cb->output->RenderedLength = 0;
cb->LastRenderedTS = cb->output->TS;
+ if (cb->Status==CB_BUFFER) {
+ cb_set_buffer_off(cb);
+ cb->Status=CB_PLAY;
+ }
+}
+
+/*drop the output CU*/
+void gf_cm_drop_output(GF_CompositionMemory *cb)
+{
+ gf_cm_output_kept(cb);
+ if (cb->Status!=CB_PLAY) {
+ return;
+ }
+
/*WARNING: in RAW mode, we (for the moment) only have one unit - setting output->dataLength to 0 means the input is available
for the raw channel - we have to make sure the output is completely reseted before releasing the sema*/
/*on visual streams (except raw oness), always keep the last AU*/
- if (!cb->no_allocation && cb->output->dataLength && (cb->odm->codec->type == GF_STREAM_VISUAL) ) {
+ if (cb->output->dataLength && (cb->odm->codec->type == GF_STREAM_VISUAL) ) {
if ( !cb->output->next->dataLength || (cb->Capacity == 1) ) {
- if (cb->odm->raw_frame_sema) {
- cb->output->dataLength = 0;
- gf_sema_notify(cb->odm->raw_frame_sema, 1);
+ Bool no_drop = 1;
+ if (cb->no_allocation ) {
+ if (cb->odm->term->bench_mode)
+ no_drop = 0;
+ else if (gf_clock_time(cb->odm->codec->ck) > cb->output->TS)
+ no_drop = 0;
+ }
+ if (no_drop) {
+ if (cb->odm->raw_frame_sema) {
+ cb->output->dataLength = 0;
+ gf_sema_notify(cb->odm->raw_frame_sema, 1);
+ }
+ return;
}
- return;
}
}
void gf_cm_set_status(GF_CompositionMemory *cb, u32 Status)
{
+ if (cb->Status == Status)
+ return;
+
gf_odm_lock(cb->odm, 1);
/*if we're asked for play, trigger on buffering*/
if (Status == CB_PLAY) {
cb->LastRenderedTS = 0;
if (cb->Status == CB_BUFFER) {
gf_clock_buffer_off(cb->odm->codec->ck);
- GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ODM%d: buffering off at %d (nb buffering on clock: %d)\n", cb->odm->OD->objectDescriptorID, gf_term_get_time(cb->odm->term), cb->odm->codec->ck->Buffering));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] CB status changed - ODM%d: buffering off at %d (nb buffering on clock: %d)\n", cb->odm->OD->objectDescriptorID, gf_term_get_time(cb->odm->term), cb->odm->codec->ck->Buffering));
}
if (Status == CB_STOP) {
gf_cm_reset(cb);
cb->LastRenderedTS = 0;
}
cb->Status = Status;
+ if (Status==CB_BUFFER) {
+ gf_clock_buffer_on(cb->odm->codec->ck);
+ }
}
gf_odm_lock(cb->odm, 0);
if (cb->Status == CB_BUFFER) {
cb->Status = CB_BUFFER_DONE;
gf_clock_buffer_off(cb->odm->codec->ck);
-// cb->odm->codec->ck->data_timeout = 0;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ODM%d: buffering off at %d (nb buffering on clock: %d)\n", cb->odm->OD->objectDescriptorID, gf_term_get_time(cb->odm->term), cb->odm->codec->ck->Buffering));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] CB EOS - ODM%d: buffering off at %d (nb buffering on clock: %d)\n", cb->odm->OD->objectDescriptorID, gf_term_get_time(cb->odm->term), cb->odm->codec->ck->Buffering));
}
cb->HasSeenEOS = 1;
return !cb->odm->codec->ck->Paused;
if ((cb->Status == CB_BUFFER_DONE) && (gf_clock_is_started(cb->odm->codec->ck) || cb->odm->term->play_state) ) {
- cb->Status = CB_PLAY;
return 1;
}
{
if (cb->Status == CB_BUFFER) {
cb->Status = CB_BUFFER_DONE;
- gf_clock_buffer_off(cb->odm->codec->ck);
-// cb->odm->codec->ck->data_timeout = 0;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ODM%d: buffering off at %d (nb buffering on clock: %d)\n", cb->odm->OD->objectDescriptorID, gf_term_get_time(cb->odm->term), cb->odm->codec->ck->Buffering));
+ cb_set_buffer_off(cb);
}
}
/*fetch output buffer, NULL if output is empty*/
GF_CMUnit *gf_cm_get_output(GF_CompositionMemory *cb);
-/*release the output buffer once rendered - if renderedLength is not equal to dataLength the
-output is NOT droped*/
+/*release the output buffer once rendered */
void gf_cm_drop_output(GF_CompositionMemory *cb);
+/*notifies the output has not been discarded: sets render length to 0 and check clock resume if needed*/
+void gf_cm_output_kept(GF_CompositionMemory *cb);
/*reset the entire memory*/
void gf_cm_reset(GF_CompositionMemory *cb);
}
}
+
GF_EXPORT
-char *gf_mo_fetch_data(GF_MediaObject *mo, Bool resync, Bool *eos, u32 *timestamp, u32 *size, s32 *ms_until_pres, u32 *ms_until_next)
+char *gf_mo_fetch_data(GF_MediaObject *mo, Bool resync, Bool *eos, u32 *timestamp, u32 *size, s32 *ms_until_pres, s32 *ms_until_next)
{
GF_Codec *codec;
Bool force_decode = GF_FALSE;
s32 diff;
Bool bench_mode;
-
- *eos = GF_FALSE;
*eos = GF_FALSE;
*timestamp = mo->timestamp;
*size = mo->framesize;
}
}
- /*fast forward, force decode if no data is available*/
- if (! *eos && (codec->ck->speed > FIX_ONE))
+ /*fast forward, bench mode with composition memory: force decode if no data is available*/
+ if (! *eos && ((codec->ck->speed > FIX_ONE) || (codec->odm->term->bench_mode && !codec->CB->no_allocation) || (codec->type==GF_STREAM_AUDIO) ) )
force_decode = GF_TRUE;
+
if (force_decode) {
u32 retry=100;
break;
}
retry--;
+ gf_sleep(0);
}
- if (!retry) {
+ if (!retry && codec->force_cb_resize) {
GF_LOG(GF_LOG_WARNING, GF_LOG_MEDIA, ("[ODM%d] At %d could not resize and decode next frame in one pass - blank frame after TS %d\n", mo->odm->OD->objectDescriptorID, gf_clock_time(codec->ck), mo->timestamp));
}
if (!gf_odm_lock_mo(mo))
/*note this assert is NOT true when recomputing DTS from CTS on the fly (MPEG1/2 RTP and H264/AVC RTP)*/
//assert(CU->TS >= codec->CB->LastRenderedTS);
- if (codec->CB->UnitCount==1) resync = GF_FALSE;
+ if (codec->CB->UnitCount<=1) resync = GF_FALSE;
- if (bench_mode) {
+ if (bench_mode && resync) {
resync = GF_FALSE;
if (mo->timestamp == CU->TS) {
if (CU->next->dataLength) {
}
}
+
/*resync*/
obj_time = gf_clock_time(codec->ck);
+
+ //no drop mode: all frames are presented, we discard the current output only if already presented and next frame time is mature
+ if (!(mo->odm->term->flags & GF_TERM_DROP_LATE_FRAMES) && (mo->type==GF_MEDIA_OBJECT_VIDEO)) {
+ resync=GF_FALSE;
+ if (gf_clock_is_started(mo->odm->codec->ck) && (mo->timestamp==CU->TS) && CU->next->dataLength && (CU->next->TS <= obj_time) ) {
+ gf_cm_drop_output(codec->CB);
+ CU = gf_cm_get_output(codec->CB);
+ }
+ }
+
if (resync) {
u32 nb_droped = 0;
while (CU->TS < obj_time) {
+ u32 diff;
if (!CU->next->dataLength) {
if (force_decode) {
obj_time = gf_clock_time(codec->ck);
break;
}
}
+ diff = CU->next->TS;
+ diff -= CU->TS;
+ if (CU->TS + codec->CB->Capacity*diff > obj_time) {
+ break;
+ }
/*figure out closest time*/
if (CU->next->TS > obj_time) {
*eos = GF_FALSE;
/*signal EOS after rendering last frame, not while rendering it*/
*eos = GF_FALSE;
- GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[ODM%d] At OTB %u fetch frame TS %u size %d (previous TS %d) - %d unit in CB - UTC "LLU" ms - %d ms until CTS is due - %d ms until next frame\n", mo->odm->OD->objectDescriptorID, gf_clock_time(codec->ck), CU->TS, mo->framesize, mo->timestamp, codec->CB->UnitCount, gf_net_get_utc(), mo->ms_until_pres, mo->ms_until_next ));
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[ODM%d (%s)] At OTB %u fetch frame TS %u size %d (previous TS %d) - %d unit in CB - UTC "LLU" ms - %d ms until CTS is due - %d ms until next frame\n", mo->odm->OD->objectDescriptorID, mo->odm->net_service->url, gf_clock_time(codec->ck), CU->TS, mo->framesize, mo->timestamp, codec->CB->UnitCount, gf_net_get_utc(), mo->ms_until_pres, mo->ms_until_next ));
}
/*also adjust CU time based on consummed bytes in input, since some codecs output very large audio chunks*/
gf_term_service_media_event(mo->odm, GF_EVENT_MEDIA_TIME_UPDATE);
gf_odm_lock(mo->odm, 0);
- if (codec->direct_vout) return codec->CB->pY;
+ if (codec->direct_vout) return (char *) codec->CB->pY;
return mo->frame;
}
}
GF_EXPORT
-void gf_mo_release_data(GF_MediaObject *mo, u32 nb_bytes, s32 forceDrop)
+void gf_mo_release_data(GF_MediaObject *mo, u32 nb_bytes, s32 drop_mode)
{
#if 0
u32 obj_time;
gf_odm_lock(mo->odm, 0);
return;
}
+
+/* if ((drop_mode==0) && !(mo->odm->term->flags & GF_TERM_DROP_LATE_FRAMES) && (mo->type==GF_MEDIA_OBJECT_VIDEO))
+ drop_mode=1;
+ else
+*/
if (mo->odm->codec->CB->no_allocation)
- forceDrop = 1;
+ drop_mode = 1;
+
/*perform a sanity check on TS since the CB may have changed status - this may happen in
temporal scalability only*/
mo->odm->codec->CB->output->RenderedLength += nb_bytes;
}
- if (forceDrop<0) {
+ if (drop_mode<0) {
/*only allow for explicit last frame keeping if only one node is using the resource
otherwise this would block the composition memory*/
- if (mo->num_open>1) forceDrop=0;
+ if (mo->num_open>1) drop_mode=0;
else {
gf_odm_lock(mo->odm, 0);
return;
/*discard frame*/
if (mo->odm->codec->CB->output->RenderedLength == mo->odm->codec->CB->output->dataLength) {
- if (forceDrop) {
+ if (drop_mode) {
gf_cm_drop_output(mo->odm->codec->CB);
- forceDrop--;
-// if (forceDrop) mo->odm->codec->nb_droped++;
+ GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[ODM%d] At OTB %u drop frame TS %u\n", mo->odm->OD->objectDescriptorID, gf_clock_time(mo->odm->codec->ck), mo->timestamp));
} else {
/*we cannot drop since we don't know the speed of the playback (which can even be frame by frame)*/
-#if 0
- obj_time = gf_clock_time(mo->odm->codec->ck);
- if (mo->odm->codec->CB->output->next->dataLength) {
- if (2*obj_time < mo->timestamp + mo->odm->codec->CB->output->next->TS ) {
- mo->odm->codec->CB->output->RenderedLength = 0;
- } else {
- gf_cm_drop_output(mo->odm->codec->CB);
- }
- } else {
- gf_cm_drop_output(mo->odm->codec->CB);
- }
-#else
- mo->odm->codec->CB->output->RenderedLength = 0;
-#endif
+
+ //notif CB we kept the output
+ gf_cm_output_kept(mo->odm->codec->CB);
}
}
}
GF_EXPORT
GF_DOMEventTarget *gf_mo_event_target_add_node(GF_MediaObject *mo, GF_Node *n)
{
+#ifndef GPAC_DISABLE_SVG
GF_DOMEventTarget *target = NULL;
if (!mo ||!n) return NULL;
- target = gf_html_media_get_event_target_from_node(n);
+ target = gf_dom_event_get_target_from_node(n);
gf_list_add(mo->evt_targets, target);
return target;
+#else
+ return NULL;
+#endif
}
GF_Err gf_mo_event_target_remove(GF_MediaObject *mo, GF_DOMEventTarget *target)
GF_DOMEventTarget *target = (GF_DOMEventTarget *)gf_list_get(mo->evt_targets, i);
if (target->ptr == node) {
gf_list_del_item(mo->evt_targets, target);
- return GF_OK;
+ i--;
+ count--;
+ //return GF_OK;
}
}
return GF_BAD_PARAM;
GF_EXPORT
GF_Node *gf_event_target_get_node(GF_DOMEventTarget *target)
{
- if (target && (target->ptr_type == GF_DOM_EVENT_TARGET_HTML_MEDIA)) {
+ if (target && (target->ptr_type == GF_DOM_EVENT_TARGET_NODE)) {
return (GF_Node *)target->ptr;
}
return NULL;
if (mo->num_open) {
if (!changed) return;
- gf_scene_notify_event(scene, GF_EVENT_UNLOAD, node, NULL, GF_OK);
+ gf_scene_notify_event(scene, GF_EVENT_UNLOAD, node, NULL, GF_OK, GF_TRUE);
gf_node_dirty_parents(node);
gf_mo_event_target_remove_by_node(mo, node);
In such a case we would end up in a deadlock - this needs urgent fixing ...
*/
-
if (ODID) {
/*if no parent we must process the url change as we may not be traversed later on (not in the scene tree)*/
if (gf_node_get_parent(node, 0)==NULL) {
if (!scene) return;
mo = scene->root_od ? scene->root_od->mo : NULL;
- gf_scene_notify_event(scene, GF_EVENT_UNLOAD, n, NULL, GF_OK);
+ gf_scene_notify_event(scene, GF_EVENT_UNLOAD, n, NULL, GF_OK, GF_TRUE);
if (!mo) return;
gf_mo_event_target_remove_by_node(mo, n);
}
}
+ /*if not attached return (attaching the graph cannot be done in render since render is not called while unattached :) */
+ if (!scene->graph_attached) {
+ /*just like protos, we must invalidate parent graph until attached*/
+ gf_node_dirty_set(n, 0, GF_TRUE);
+ return;
+ }
+ /*clear dirty flags for any sub-inlines, bitmaps or protos*/
+ gf_node_dirty_clear(n, 0);
+
+ current_url = scene->current_url;
+ scene->current_url = & ((M_Inline*)n)->url;
+ gf_sc_traverse_subscene(scene->root_od->term->compositor, n, scene->graph, rs);
+ scene->current_url = current_url;
+
+ //do we have to restart for next frame ? If so let's do it
gf_inline_check_restart(scene);
/*if we need to restart, shutdown graph and do it*/
return;
}
- /*if not attached return (attaching the graph cannot be done in render since render is not called while unattached :) */
- if (!scene->graph_attached) {
- /*just like protos, we must invalidate parent graph until attached*/
- gf_node_dirty_set(n, 0, GF_TRUE);
- return;
- }
- /*clear dirty flags for any sub-inlines, bitmaps or protos*/
- gf_node_dirty_clear(n, 0);
-
- current_url = scene->current_url;
- scene->current_url = & ((M_Inline*)n)->url;
- gf_sc_traverse_subscene(scene->root_od->term->compositor, n, scene->graph, rs);
- scene->current_url = current_url;
}
sprintf(szMsg, "!! UDP down (%s) - Retrying with TCP !!\n", message);
gf_term_message(term, service->url, szMsg, GF_IP_NETWORK_FAILURE);
- /*reload scene*/
+ /*reload scene - FIXME this shall work on inline nodes, not on the root !*/
if (term->reload_url) gf_free(term->reload_url);
term->reload_state = 1;
term->reload_url = gf_strdup(term->root_scene->root_od->net_service->url);
evt.connect.is_connected = 0;
gf_term_send_event(term, &evt);
} else {
- if (root->subscene) gf_scene_notify_event(root->subscene, GF_EVENT_SCENE_ATTACHED, NULL, NULL, err);
+ if (root->subscene) gf_scene_notify_event(root->subscene, GF_EVENT_SCENE_ATTACHED, NULL, NULL, err, GF_FALSE);
/*try to reinsert OD for VRML/X3D with multiple URLs:
1- first remove from parent scene without destroying object, this will trigger a re-setup
if other URLs are present
if (!no_scene_check && scene->is_dynamic_scene) gf_scene_regenerate(scene);
}
+static void gather_buffer_level(GF_ObjectManager *odm, GF_ClientService *service, GF_NetworkCommand *com, s32 *max_buffer_time)
+{
+ u32 j, count = gf_list_count(odm->channels);
+ for (j=0; j<count; j++) {
+ GF_Channel *ch = (GF_Channel *)gf_list_get(odm->channels, j);
+ if (ch->service != service) continue;
+ if (ch->es_state != GF_ESM_ES_RUNNING) continue;
+ if (com->base.on_channel && (com->base.on_channel != ch)) continue;
+ if (/*!ch->MaxBuffer || */ch->dispatch_after_db || ch->bypass_sl_and_db || ch->IsEndOfStream) continue;
+ //perform buffer management only on base layer -this is because we don't signal which ESs are on/off in the underlying service ...
+ if (ch->esd->dependsOnESID) continue;
+ if (ch->MaxBuffer>com->buffer.max) com->buffer.max = ch->MaxBuffer;
+ if (ch->MinBuffer<com->buffer.min) com->buffer.min = ch->MinBuffer;
+ if (ch->IsClockInit) {
+ if (ch->BufferTime > (s32) *max_buffer_time)
+ *max_buffer_time = ch->BufferTime;
+
+ /*if we don't have more units (compressed or not) than requested max for the composition memory, request more data*/
+ if (ch->odm->codec && ch->odm->codec->CB && (odm->codec->CB->UnitCount + ch->AU_Count <= odm->codec->CB->Capacity)) {
+ com->buffer.occupancy = 0;
+ } else if ( (u32) ch->BufferTime < com->buffer.occupancy) {
+ com->buffer.occupancy = ch->BufferTime;
+ }
+ } else {
+ com->buffer.occupancy = 0;
+ }
+ }
+}
+
static void term_on_command(void *user_priv, GF_ClientService *service, GF_NetworkCommand *com, GF_Err response)
{
GF_Channel *ch;
if (com->command_type==GF_NET_BUFFER_QUERY) {
GF_List *od_list;
- u32 i;
+ u32 i, max_buffer_time;
GF_ObjectManager *odm;
com->buffer.max = 0;
com->buffer.min = com->buffer.occupancy = (u32) -1;
/*get exclusive access to media scheduler, to make sure ODs are not being
manipulated*/
gf_mx_p(term->mm_mx);
+ max_buffer_time=0;
if (!gf_list_count(od_list))
GF_LOG(GF_LOG_WARNING, GF_LOG_MEDIA, ("[ODM] No object manager found for the scene (URL: %s), buffer occupancy will remain unchanged\n", service->url));
i=0;
while ((odm = (GF_ObjectManager*)gf_list_enum(od_list, &i))) {
- u32 j, count;
if (!odm->codec) continue;
- count = gf_list_count(odm->channels);
- for (j=0; j<count; j++) {
- GF_Channel *ch = (GF_Channel *)gf_list_get(odm->channels, j);
- if (ch->service != service) continue;
- if (ch->es_state != GF_ESM_ES_RUNNING) continue;
- if (com->base.on_channel && (com->base.on_channel != ch)) continue;
- if (/*!ch->MaxBuffer || */ch->dispatch_after_db || ch->bypass_sl_and_db || ch->IsEndOfStream) continue;
- //perform buffer management only on base layer -this is because we don't signal which ESs are on/off in the underlying service ...
- if (ch->esd->dependsOnESID) continue;
- if (ch->MaxBuffer>com->buffer.max) com->buffer.max = ch->MaxBuffer;
- if (ch->MinBuffer<com->buffer.min) com->buffer.min = ch->MinBuffer;
- if (ch->IsClockInit) {
- /*if we don't have more units (compressed or not) than requested max for the composition memory, request more data*/
- if (ch->odm->codec && ch->odm->codec->CB && (odm->codec->CB->UnitCount + ch->AU_Count <= odm->codec->CB->Capacity)) {
- com->buffer.occupancy = 0;
-// com->buffer.occupancy = ch->BufferTime;
- } else if ( (u32) ch->BufferTime < com->buffer.occupancy) {
- com->buffer.occupancy = ch->BufferTime;
- }
- }
- }
+ gather_buffer_level(odm, service, com, &max_buffer_time);
}
gf_mx_v(term->mm_mx);
-// fprintf(stderr, "Buffer occupancy %d\n", com->buffer.occupancy);
if (com->buffer.occupancy==(u32) -1) com->buffer.occupancy = 0;
+
+ //in bench mode return the 1 if one of the buffer is full (eg sleep until all buffers are not full), 0 otherwise
+ if (term->bench_mode) {
+ com->buffer.occupancy = (max_buffer_time>com->buffer.max) ? 2 : 0;
+ com->buffer.max = 1;
+ com->buffer.min = 0;
+ }
return;
}
if (com->command_type==GF_NET_SERVICE_INFO) {
return;
}
if (com->command_type==GF_NET_SERVICE_MEDIA_CAP_QUERY) {
- gf_sc_get_av_caps(term->compositor, &com->mcaps.width, &com->mcaps.height, &com->mcaps.bpp, &com->mcaps.channels, &com->mcaps.sample_rate);
+ gf_sc_get_av_caps(term->compositor, &com->mcaps.width, &com->mcaps.height, &com->mcaps.display_bit_depth, &com->mcaps.audio_bpp, &com->mcaps.channels, &com->mcaps.sample_rate);
return;
}
+ if (com->command_type==GF_NET_ASSOCIATED_CONTENT_LOCATION) {
+ GF_Scene *scene;
+ if (service->owner->subscene) {
+ scene = service->owner->subscene;
+ } else if (service->owner->parentscene) {
+ scene = service->owner->parentscene;
+ }
+ gf_scene_register_associated_media(scene, &com->addon_info);
+ return;
+ }
+ if (com->command_type==GF_NET_ASSOCIATED_CONTENT_TIMING) {
+ GF_Scene *scene;
+ if (service->owner->subscene) {
+ scene = service->owner->subscene;
+ } else if (service->owner->parentscene) {
+ scene = service->owner->parentscene;
+ }
+ gf_scene_notify_associated_media_timeline(scene, &com->addon_time);
+ return;
+ }
+
+
if (!com->base.on_channel) return;
ch = gf_term_get_channel(service, com->base.on_channel);
gf_es_buffer_off(ch);
break;
case GF_NET_CHAN_BUFFER:
- //lock channel before updating buffer info, otherwise we may collect wrong HTML media info
- gf_mx_p(ch->mx);
- ch->BufferTime = com->buffer.occupancy;
- ch->MaxBuffer = com->buffer.max;
+ ch->BufferTime = 100 * com->buffer.occupancy / com->buffer.max;
gf_scene_buffering_info(ch->odm->parentscene ? ch->odm->parentscene : ch->odm->subscene);
- ch->MaxBuffer = 0;
- gf_mx_v(ch->mx);
break;
default:
return;
}
-static GF_InputService *gf_term_can_handle_service(GF_Terminal *term, const char *url, const char *parent_url, Bool no_mime_check, char **out_url, GF_Err *ret_code, GF_DownloadSession **the_session)
+static GF_InputService *gf_term_can_handle_service(GF_Terminal *term, const char *url, const char *parent_url, Bool no_mime_check, char **out_url, GF_Err *ret_code, GF_DownloadSession **the_session, char **out_mime_type)
{
u32 i;
GF_Err e;
mime_type = NULL;
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[Terminal] Looking for plugin for URL %s\n", url));
*out_url = NULL;
+ *out_mime_type = NULL;
sURL = NULL;
if (!url || !strncmp(url, "\\\\", 2) ) {
(*ret_code) = GF_URL_ERROR;
const char *sPlug = gf_cfg_get_key(term->user->config, "MimeTypes", mime_type);
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[Terminal] Mime type found: %s\n", mime_type));
if (!sPlug) {
- gf_free(mime_type);
+ *out_mime_type = mime_type;
mime_type=NULL;
}
if (sPlug) sPlug = strrchr(sPlug, '"');
if (the_session && *the_session) {
gf_dm_sess_del(*the_session);
}
+ if (mime_type) gf_free(mime_type);
+ mime_type = NULL;
+ if (*out_mime_type) gf_free(*out_mime_type);
+ *out_mime_type = NULL;
} else {
*out_url = sURL;
GF_LOG(GF_LOG_INFO, GF_LOG_MEDIA, ("[Terminal] Found input plugin %s for URL %s (%s)\n", ifce->module_name, sURL, mime_type ? mime_type : "no mime type"));
}
if (mime_type)
- gf_free(mime_type);
- mime_type = NULL;
+ *out_mime_type = mime_type;
return ifce;
}
{
GF_DownloadSession *download_session = NULL;
char *sURL;
+ char *mime;
GF_ClientService *serv;
- GF_InputService *ifce = gf_term_can_handle_service(term, url, parent_url, 0, &sURL, ret_code, &download_session);
+ GF_InputService *ifce = gf_term_can_handle_service(term, url, parent_url, 0, &sURL, ret_code, &download_session, &mime);
if (!ifce) return NULL;
GF_SAFEALLOC(serv, GF_ClientService);
serv->owner = owner;
serv->ifce = ifce;
serv->url = sURL;
+ serv->mime = mime;
serv->Clocks = gf_list_new();
serv->dnloads = gf_list_new();
serv->pending_service_session = download_session;
GF_InputService *ifce;
GF_Err e;
char *sURL;
+ char *mime=NULL;
char *parent_url = NULL;
if (use_parent_url && term->root_scene) parent_url = term->root_scene->root_od->net_service->url;
- ifce = gf_term_can_handle_service(term, fileName, parent_url, no_mime_check, &sURL, &e, NULL);
+ ifce = gf_term_can_handle_service(term, fileName, parent_url, no_mime_check, &sURL, &e, NULL, &mime);
if (!ifce) return 0;
gf_modules_close_interface((GF_BaseInterface *) ifce);
gf_free(sURL);
+ if (mime) gf_free(mime);
return 1;
}
gf_modules_close_interface((GF_BaseInterface *)ns->ifce);
gf_free(ns->url);
+ gf_free(ns->mime);
/*delete all the clocks*/
if (odm->codec->CB) {
info->cb_max_count = odm->codec->CB->Capacity;
info->cb_unit_count = odm->codec->CB->UnitCount;
+ if (odm->codec->direct_vout) {
+ info->direct_video_memory = 1;
+ }
}
}
#include <gpac/internal/terminal_dev.h>
+#include <gpac/internal/compositor_dev.h>
#include <gpac/constants.h>
#include "media_memory.h"
#include "media_control.h"
return GF_OK;
}
+static Bool gf_odm_should_auto_select(GF_ObjectManager *odm)
+{
+ u32 i, count;
+ if (gf_codec_is_scene_or_image(odm->codec)) return GF_TRUE;
+
+ if (odm->parentscene && !odm->parentscene->is_dynamic_scene) return GF_TRUE;
+
+ count = gf_list_count(odm->parentscene->resources);
+ for (i=0; i<count; i++) {
+ GF_ObjectManager *an_odm = gf_list_get(odm->parentscene->resources, i);
+ if (an_odm==odm) continue;
+ if (!an_odm->codec) continue;
+ if (an_odm->codec->type != odm->codec->type) continue;
+ //same type - if the first one has been autumatically activated, do not activate this one
+ if (an_odm->state == GF_ODM_STATE_PLAY) return GF_FALSE;
+ }
+ return GF_TRUE;
+}
/*connection of OD and setup of streams. The streams are not requested if the OD
if (odm->parentscene) {
GF_Event evt;
- gf_scene_setup_object(odm->parentscene, odm);
+ if (!odm->scalable_addon)
+ gf_scene_setup_object(odm->parentscene, odm);
/*setup node decoder*/
if (odm->mo && odm->codec && odm->codec->decio && (odm->codec->decio->InterfaceType==GF_NODE_DECODER_INTERFACE) ) {
GF_Node *n = gf_event_target_get_node(gf_mo_event_target_get(odm->mo, 0));
if (n) ndec->AttachNode(ndec, n);
- /*not clear in the spec how the streams attached to AFC are started - default to "right now"*/
+ /*not clear in the spec how the streams attached to AFX are started - default to "right now"*/
gf_odm_start(odm, 0);
}
have to wait for an entire image carousel period to start filling the buffers, which is sub-optimal
we also force a prefetch for object declared outside the OD stream to make sure we don't loose any data before object declaration and play
as can be the case with MPEG2 TS (first video packet right after the PMT) - this should be refined*/
- else if ( ((odm->flags & GF_ODM_NO_TIME_CTRL) || (odm->flags & GF_ODM_NOT_IN_OD_STREAM)) && (odm->parentscene->selected_service_id == odm->OD->ServiceID)) {
+ else if ( ((odm->flags & GF_ODM_NO_TIME_CTRL) || (odm->flags & GF_ODM_NOT_IN_OD_STREAM)) && gf_odm_should_auto_select(odm) && (odm->parentscene->selected_service_id == odm->OD->ServiceID)) {
Bool force_play = GF_FALSE;
if (odm->state==GF_ODM_STATE_STOP) {
odm->flags |= GF_ODM_PREFETCH;
else if ((odm->state==GF_ODM_STATE_PLAY) && (gf_list_del_item(odm->term->media_queue, odm)>=0) ) {
force_play = GF_TRUE;
}
+
if (force_play) {
odm->flags |= GF_ODM_INITIAL_BROADCAST_PLAY;
GF_LOG(GF_LOG_INFO, GF_LOG_MEDIA, ("[ODM%d] Inserted from broadcast or input service - forcing play\n", odm->OD->objectDescriptorID));
if (odm->OD_PL) {
gf_scene_select_object(odm->parentscene, odm);
odm->OD_PL = 0;
+ gf_term_lock_net(odm->term, GF_FALSE);
+ return;
+ }
+
+ if (odm->addon) {
+ gf_term_lock_net(odm->term, GF_FALSE);
+
+ if (! odm->addon->scalable_type) {
+ gf_scene_select_object(odm->parentscene, odm);
+ }
+ return;
}
+
if (odm->parentscene==odm->term->root_scene) {
gf_term_lock_net(odm->term, GF_FALSE);
evt.type = GF_EVENT_STREAMLIST;
gf_term_send_event(odm->term,&evt);
-
- gf_term_lock_net(odm->term, GF_TRUE);
+ return;
}
}
GF_EXPORT
GF_Err gf_odm_setup_es(GF_ObjectManager *odm, GF_ESD *esd, GF_ClientService *serv, GF_MediaObject *sync_ref)
{
- GF_CodecCapability cap;
GF_Channel *ch;
GF_Clock *ck;
GF_List *ck_namespace;
/*we have a media or user-specific codec...*/
if (!odm->codec) {
odm->codec = gf_codec_new(odm, esd, (esd->decoderConfig->streamType==GF_STREAM_VISUAL) ? odm->Visual_PL : odm->Audio_PL, &e);
- if (!e) gf_term_add_codec(odm->term, odm->codec);
+ if (!e && odm->codec) gf_term_add_codec(odm->term, odm->codec);
}
dec = odm->codec;
break;
}
dec = odm->subscene->scene_codec;
} else {
- /*this is a bit tricky: the scene decoder needs to ba called with the dummy streams of this
+ /*this is a bit tricky: the scene decoder needs to be called with the dummy streams of this
object, so we associate the main decoder to this object*/
odm->codec = dec = gf_codec_use_codec(odm->parentscene->scene_codec, odm);
gf_term_add_codec(odm->term, odm->codec);
default:
if (!odm->codec) {
odm->codec = gf_codec_new(odm, esd, odm->OD_PL, &e);
- if (!e) gf_term_add_codec(odm->term, odm->codec);
+ if (!e && odm->codec) gf_term_add_codec(odm->term, odm->codec);
}
dec = odm->codec;
break;
}
- /*if we have a decoder, set up the channel and co.*/
- if (!dec) {
- if (e) {
- gf_es_del(ch);
- return e;
- }
+ if (!dec && e) {
+ gf_es_del(ch);
+ return e;
}
/*setup scene decoder*/
- if (dec->decio && (dec->decio->InterfaceType==GF_SCENE_DECODER_INTERFACE) ) {
+ if (dec && dec->decio && (dec->decio->InterfaceType==GF_SCENE_DECODER_INTERFACE) ) {
GF_SceneDecoder *sdec = (GF_SceneDecoder *) dec->decio;
scene = odm->subscene ? odm->subscene : odm->parentscene;
if (sdec->AttachScene) {
}
}
}
- {
+
+ ch->es_state = GF_ESM_ES_SETUP;
+ ch->odm = odm;
+
+ if (dec) {
GF_CodecCapability cap;
cap.CapCode = GF_CODEC_RAW_MEDIA;
gf_codec_get_capability(dec, &cap);
dec->flags |= GF_ESM_CODEC_IS_RAW_MEDIA;
dec->process = gf_codec_process_private_media;
}
- }
- ch->es_state = GF_ESM_ES_SETUP;
- ch->odm = odm;
-
- /*get media padding BEFORE channel setup, since we use it on channel connect ack*/
- if (dec) {
+ /*get media padding BEFORE channel setup, since we use it on channel connect ack*/
cap.CapCode = GF_CODEC_PADDING_BYTES;
gf_codec_get_capability(dec, &cap);
ch->media_padding_bytes = cap.cap.valueInt;
cs->dec = dec;
/*HACK: special case when OD resources are statically described in the ESD itself (ISMA streaming)*/
- if ((ch->esd->decoderConfig->streamType==GF_STREAM_OD) && strstr(ch->esd->URLString, "data:application/mpeg4-od-au;") )
+ if (dec && (ch->esd->decoderConfig->streamType==GF_STREAM_OD) && strstr(ch->esd->URLString, "data:application/mpeg4-od-au;") )
dec->flags |= GF_ESM_CODEC_IS_STATIC_OD;
gf_term_lock_net(odm->term, 1);
}
/*insert channel*/
- if (dec) gf_list_insert(ch->odm->channels, ch, 0);
+ gf_list_insert(ch->odm->channels, ch, 0);
if (ch->service) {
ch->es_state = GF_ESM_ES_WAIT_FOR_ACK;
gf_term_message(ch->odm->term, ch->service->url, "Audio Setup failed", e);
break;
}
- gf_list_rem(ch->odm->channels, 0);
+ gf_list_del_item(ch->odm->channels, ch);
/*disconnect*/
ch->service->ifce->DisconnectChannel(ch->service->ifce, ch);
if (ch->esd->URLString) {
GF_LOG(GF_LOG_INFO, GF_LOG_MEDIA, ("[ODM%d] CH%d: At OTB %u starting channel\n", odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_time(ch->clock)));
}
skip_register = 0;
+
+ }
+ //wait for end of setup
+ else if (odm->state==GF_ODM_STATE_IN_SETUP) {
+ media_queue_state=0;
+ skip_register = 0;
}
/*object is already started - only reinsert in media queue if this function was called on an object already in the queue*/
else {
ck_time = (Double) (s64) odm->media_start_time;
ck_time /= 1000;
}
+ else if (odm->parentscene && odm->parentscene->root_od->media_start_time && !ch->clock->clock_init) {
+ ck_time = (Double) (s64) odm->parentscene->root_od->media_start_time;
+ ck_time /= 1000;
+ }
/*play from current time*/
else {
ck_time = gf_clock_time(ch->clock);
+ if (odm->parentscene && odm->parentscene->root_od->addon) {
+ ck_time = gf_scene_adjust_time_for_addon(odm->parentscene, (u32) ck_time, odm->parentscene->root_od->addon);
+
+ if (odm->scalable_addon) {
+ //this is a scalable extension to an object in the parent scene
+ gf_scene_select_scalable_addon(odm->parentscene->root_od->parentscene, odm);
+ }
+
+ }
ck_time /= 1000;
+
/*handle initial start - MPEG-4 is a bit annoying here, streams are not started through OD but through
scene nodes. If the stream runs on the BIFS/OD clock, the clock is already started at this point and we're
sure to get at least a one-frame delay in PLAY, so just remove it - note we're generous but this shouldn't hurt*/
/*little opt for image codecs: don't actually stop the OD*/
if (!force_close && odm->codec && odm->codec->CB && !odm->codec->CB->no_allocation) {
- if (odm->codec->CB->Capacity==1) return;
+ if (odm->codec->CB->Capacity==1) {
+ gf_cm_abort_buffering(odm->codec->CB);
+ return;
+ }
}
/*if raw media, stop all channels before sending stop command to network, to avoid new media frames to be set
void gf_odm_signal_eos(GF_ObjectManager *odm)
{
- //FIXME make this work with gui ?
- if (odm->parentscene && (odm->parentscene != odm->term->root_scene) ) return;
- if (gf_term_check_end_of_scene(odm->term, 0)) {
- GF_Event evt;
- evt.type = GF_EVENT_EOS;
- gf_term_send_event(odm->term, &evt);
+ if (odm->parentscene && (odm->parentscene != odm->term->root_scene) ) {
+ GF_ObjectManager *root = odm->parentscene->root_od;
+ Bool is_over = 0;
+
+ if (!gf_scene_check_clocks(root->net_service, root->subscene)) return;
+ if (root->subscene->is_dynamic_scene)
+ is_over = 1;
+ else
+ is_over = gf_sc_is_over(odm->term->compositor, root->subscene->graph);
+
+ if (is_over) {
+ gf_term_service_media_event(root, GF_EVENT_MEDIA_ENDED);
+ }
+ } else {
+ if (gf_term_check_end_of_scene(odm->term, 0)) {
+ GF_Event evt;
+ evt.type = GF_EVENT_EOS;
+ gf_term_send_event(odm->term, &evt);
+ }
}
}
#include "input_sensor.h"
#include "media_memory.h"
+void gf_scene_reset_addons(GF_Scene *scene);
+
GF_EXPORT
Double gf_scene_get_time(void *_is)
{
tmp->resources = gf_list_new();
tmp->scene_objects = gf_list_new();
tmp->extra_scenes = gf_list_new();
+ tmp->declared_addons = gf_list_new();
/*init inline scene*/
if (parentScene) {
tmp->graph = gf_sg_new_subscene(parentScene->graph);
gf_list_del(scene->keynavigators);
#endif
+ gf_list_del(scene->declared_addons);
+
if (scene->audio_url.url) gf_free(scene->audio_url.url);
if (scene->visual_url.url) gf_free(scene->visual_url.url);
if (scene->text_url.url) gf_free(scene->text_url.url);
gf_sc_set_scene(scene->root_od->term->compositor, NULL);
}
+ gf_scene_reset_addons(scene);
+
/*release the scene - at this stage, we no longer have any node stack refering to our media objects */
if (dec && dec->ReleaseScene) dec->ReleaseScene(dec);
+ gf_sc_node_destroy(scene->root_od->term->compositor, NULL, scene->graph);
gf_sg_reset(scene->graph);
scene->graph_attached = 0;
-void gf_scene_notify_event(GF_Scene *scene, u32 event_type, GF_Node *n, void *_event, GF_Err code)
+void gf_scene_notify_event(GF_Scene *scene, u32 event_type, GF_Node *n, void *_event, GF_Err code, Bool no_queueing)
{
/*fire resize event*/
#ifndef GPAC_DISABLE_SVG
GF_Node *root;
u32 i, count;
u32 w, h;
- GF_DOM_Event evt, *event;
- event = (GF_DOM_Event *)_event;
+ GF_DOM_Event evt, *dom_event;
+ dom_event = (GF_DOM_Event *)_event;
if (!scene) return;
root = gf_sg_get_root_node(scene->graph);
- if (!event) {
+ if (!dom_event) {
memset(&evt, 0, sizeof(GF_DOM_Event));
- event = &evt;
+ dom_event = &evt;
w = h = 0;
gf_sg_get_scene_size_info(scene->graph, &w, &h);
evt.type = event_type;
evt.screen_rect.width = INT2FIX(w);
evt.screen_rect.height = INT2FIX(h);
+ evt.key_flags = scene->is_dynamic_scene;
if (root) {
#ifndef GPAC_DISABLE_VRML
switch (gf_node_get_tag(root)) {
evt.error_state = code;
}
if (n) {
- gf_dom_event_fire(n, event);
+ if (no_queueing) {
+ gf_dom_event_fire(n, dom_event);
+ } else {
+ gf_sc_queue_dom_event(scene->root_od->term->compositor, n, dom_event);
+ }
} else {
- if (root) gf_dom_event_fire(root, event);
+ if (root) {
+ if (no_queueing) {
+ gf_dom_event_fire(root, dom_event);
+ } else {
+ gf_sc_queue_dom_event(scene->root_od->term->compositor, root, dom_event);
+ }
+ }
count=scene->root_od->mo ? gf_mo_event_target_count(scene->root_od->mo) : 0;
for (i=0;i<count; i++) {
- gf_dom_event_fire(gf_event_target_get_node(gf_mo_event_target_get(scene->root_od->mo, i)), event);
+ GF_Node *an = gf_event_target_get_node(gf_mo_event_target_get(scene->root_od->mo, i));
+ if (no_queueing) {
+ gf_dom_event_fire(an, dom_event);
+ } else {
+ gf_sc_queue_dom_event(scene->root_od->term->compositor, an, dom_event);
+ }
}
}
#endif
gf_sc_set_size(scene->root_od->term->compositor, w, h);
}
/*trigger a scene attach event*/
- gf_scene_notify_event(scene, GF_EVENT_SCENE_ATTACHED, NULL, NULL, GF_OK);
+ gf_scene_notify_event(scene, GF_EVENT_SCENE_ATTACHED, NULL, NULL, GF_OK, GF_FALSE);
}
}
if (obj->odm) {
Bool can_reuse = GF_TRUE;
Bool timeline_locked = (obj->odm->flags & GF_ODM_INHERIT_TIMELINE) ? GF_TRUE : GF_FALSE;
- if (timeline_locked != lock_timelines)
+
+ //addon object always share the timeline
+ if (obj->odm->addon || obj->odm->parentscene->root_od->addon)
+ timeline_locked = lock_timelines = 1;
+
+ if (timeline_locked != lock_timelines)
continue;
gf_term_lock_media_queue(scene->root_od->term, GF_TRUE);
{
MFURL url;
M_Transform2D *tr;
+ M_Layer2D *layer;
GF_MediaObject *mo;
u32 w, h, v_w, v_h;
if (!scene->visual_url.OD_ID && !scene->visual_url.url) return;
tr->translation.y = INT2FIX((s32) (h - v_h)) / 2;
gf_node_dirty_set((GF_Node *)tr, 0, 0);
+
+ tr = (M_Transform2D *) gf_sg_find_node_by_name(scene->graph, "ADDON_TRANS");
+ if (!tr) return;
+ tr->translation.x = INT2FIX(v_w) / 4;
+ tr->translation.y = INT2FIX(v_h) / 4;
+ gf_node_dirty_set((GF_Node *)tr, 0, 0);
+
+ layer = (M_Layer2D *) gf_sg_find_node_by_name(scene->graph, "ADDON_LAYER");
+ if (!layer) return;
+ layer->size.x = INT2FIX(v_w) / 2;
+ layer->size.y = INT2FIX(v_h) / 2;
+ gf_node_dirty_set((GF_Node *)layer, 0, 0);
+
+
if (scene->root_od->term->root_scene == scene) {
//if (scene->graph_attached) gf_sc_set_scene(scene->root_od->term->compositor, NULL);
//gf_sc_set_scene(scene->root_od->term->compositor, scene->graph);
u32 i=0;
GF_ObjectManager *odm = NULL;
while ((odm = (GF_ObjectManager*)gf_list_enum(scene->resources, &i))) {
+ if (odm->scalable_addon)
+ continue;
+
if (type==GF_STREAM_TEXT) {
if (!odm->codec || ((odm->codec->type!=type) && (odm->codec->type!=GF_STREAM_ND_SUBPIC))) continue;
}
else if (type==GF_STREAM_SCENE) {
if (!odm->subscene || (!odm->subscene->scene_codec && !odm->subscene->is_dynamic_scene) ) continue;
+
+ if (odm->subscene->root_od->addon)
+ continue;
}
else {
if (!odm->codec || (odm->codec->type!=type)) continue;
M_MovieTexture *mt;
M_AnimationStream *as;
M_Inline *dims;
+ M_Transform2D *addon_tr;
+ M_Layer2D *addon_layer;
+ M_Inline *addon_scene;
if (scene->is_dynamic_scene != 1) return;
/*3GPP DIMS streams controlled */
n1 = gf_sg_get_root_node(scene->graph);
- dims = (M_Inline *) is_create_node(scene->graph, TAG_MPEG4_Inline, "DYN_SCENE");
+ dims = (M_Inline *) is_create_node(scene->graph, TAG_MPEG4_Inline, "DIMS_SCENE");
gf_node_list_add_child( &((GF_ParentNode *)n1)->children, (GF_Node*)dims);
gf_node_register((GF_Node *)dims, n1);
+
+ /*Media addon scene*/
+ n1 = gf_sg_get_root_node(scene->graph);
+ addon_tr = (M_Transform2D *) is_create_node(scene->graph, TAG_MPEG4_Transform2D, "ADDON_TRANS");
+ gf_node_list_add_child( &((GF_ParentNode *)n1)->children, (GF_Node*)addon_tr);
+ gf_node_register((GF_Node *)addon_tr, n1);
+
+ addon_layer = (M_Layer2D *) is_create_node(scene->graph, TAG_MPEG4_Layer2D, "ADDON_LAYER");
+ gf_node_list_add_child( &((GF_ParentNode *)addon_tr)->children, (GF_Node*)addon_layer);
+ gf_node_register((GF_Node *)addon_layer, (GF_Node *)addon_tr);
+
+ addon_scene = (M_Inline *) is_create_node(scene->graph, TAG_MPEG4_Inline, "ADDON_SCENE");
+ gf_node_list_add_child( &((GF_ParentNode *)addon_layer)->children, (GF_Node*)addon_scene);
+ gf_node_register((GF_Node *)addon_scene, (GF_Node *)addon_layer);
}
+
ac = (M_AudioClip *) gf_sg_find_node_by_name(scene->graph, "DYN_AUDIO");
set_media_url(scene, &scene->audio_url, (GF_Node*)ac, &ac->url, GF_STREAM_AUDIO);
as = (M_AnimationStream *) gf_sg_find_node_by_name(scene->graph, "DYN_TEXT");
set_media_url(scene, &scene->text_url, (GF_Node*)as, &as->url, GF_STREAM_TEXT);
- dims = (M_Inline *) gf_sg_find_node_by_name(scene->graph, "DYN_SCENE");
+ dims = (M_Inline *) gf_sg_find_node_by_name(scene->graph, "DIMS_SCENE");
set_media_url(scene, &scene->dims_url, (GF_Node*)dims, &dims->url, GF_STREAM_SCENE);
gf_sc_lock(scene->root_od->term->compositor, 0);
IS_UpdateVideoPos(scene);
} else {
scene->graph_attached = 1;
- gf_scene_notify_event(scene, GF_EVENT_SCENE_ATTACHED, NULL, NULL, GF_OK);
+ gf_scene_notify_event(scene, GF_EVENT_SCENE_ATTACHED, NULL, NULL, GF_OK, GF_FALSE);
gf_term_invalidate_compositor(scene->root_od->term);
}
}
char *url;
if (!scene->is_dynamic_scene || !scene->graph_attached || !odm) return;
- if (!odm->codec) return;
+ if (!odm->codec) {
+ if (!odm->addon) return;
+ }
if (odm->state) {
if (check_odm_deactivate(&scene->audio_url, odm, gf_sg_find_node_by_name(scene->graph, "DYN_AUDIO")) ) return;
if (check_odm_deactivate(&scene->text_url, odm, gf_sg_find_node_by_name(scene->graph, "DYN_TEXT") )) return;
}
+
+ if (!odm->codec && odm->subscene) {
+ M_Inline *dscene = (M_Inline *) gf_sg_find_node_by_name(scene->graph, "ADDON_SCENE");
+
+ gf_sg_vrml_field_copy(&dscene->url, &odm->mo->URLs, GF_SG_VRML_MFURL);
+ gf_node_changed((GF_Node *)dscene, NULL);
+ IS_UpdateVideoPos(scene);
+ return;
+ }
+
if (odm->codec->type == GF_STREAM_AUDIO) {
M_AudioClip *ac = (M_AudioClip *) gf_sg_find_node_by_name(scene->graph, "DYN_AUDIO");
if (!ac) return;
/*for now only allowed when no scene info*/
if (!scene->is_dynamic_scene) return;
- gf_sc_lock(scene->root_od->term->compositor, 1);
-
GF_LOG(GF_LOG_INFO, GF_LOG_COMPOSE, ("[Compositor] Changing scene size to %d x %d\n", width, height));
if (scene->root_od->term->root_scene == scene) {
IS_UpdateVideoPos(scene);
#endif
- gf_sc_lock(scene->root_od->term->compositor, 0);
-
- gf_scene_notify_event(scene, GF_EVENT_SCENE_ATTACHED, NULL, NULL, GF_OK);
+ gf_scene_notify_event(scene, GF_EVENT_SCENE_ATTACHED, NULL, NULL, GF_OK, GF_FALSE);
}
}
i=0;
while ( (odm = (GF_ObjectManager*)gf_list_enum(scene->resources, &i)) ) {
- if (odm->net_service != ns) {
+ if (odm->net_service && (odm->net_service != ns)) {
if (!gf_scene_check_clocks(odm->net_service, NULL)) return 0;
} else if (odm->codec && odm->codec->CB && !gf_cm_is_eos(odm->codec->CB) ) {
return 0;
M_Inline *inl;
#endif
GF_Event evt;
+ gf_sc_node_destroy(scene->root_od->term->compositor, NULL, scene->graph);
gf_sg_reset(scene->graph);
scene->force_single_timeline = 1;
evt.connect.is_connected = 1;
gf_term_send_event(scene->root_od->term, &evt);
}
+
+void scene_reset_addon(GF_AddonMedia *addon, Bool disconnect)
+{
+ if (disconnect && addon->root_od) gf_odm_disconnect(addon->root_od, 1);
+ if (addon->url) gf_free(addon->url);
+ gf_free(addon);
+}
+
+void gf_scene_reset_addons(GF_Scene *scene)
+{
+ while (gf_list_count(scene->declared_addons)) {
+ GF_AddonMedia *addon = gf_list_last(scene->declared_addons);
+ gf_list_rem_last(scene->declared_addons);
+ if (addon==scene->active_addon) continue;
+
+ scene_reset_addon(addon, 0);
+ }
+ if (scene->active_addon) scene_reset_addon(scene->active_addon, 0);
+}
+
+static void load_associated_media(GF_Scene *scene, GF_AddonMedia *addon)
+{
+ GF_MediaObject *mo;
+ MFURL url;
+ SFURL sfurl;
+
+ if (!addon->enabled) return;
+
+ url.count=1;
+ url.vals = &sfurl;
+ url.vals[0].OD_ID = GF_MEDIA_EXTERNAL_ID;
+ url.vals[0].url = (char *)addon->url;
+
+ //we may need to change the object type once we have more ideas what the external resource is about.
+ //By default we start with scene
+ //we force the timeline of the addon to be locked with the main scene
+ mo = gf_scene_get_media_object(scene, &url, GF_MEDIA_OBJECT_SCENE, GF_TRUE);
+
+ if (!mo) return;
+ gf_free(addon->url);
+ addon->url = NULL;
+ addon->root_od = mo->odm;
+ mo->odm->addon = addon;
+}
+
+void gf_scene_register_associated_media(GF_Scene *scene, GF_AssociatedContentLocation *addon_info)
+{
+ GF_AddonMedia *addon;
+ GF_Event evt;
+ u32 i, count;
+
+ if (!scene->is_dynamic_scene) return;
+
+ count = gf_list_count(scene->declared_addons);
+ for (i=0; i<count; i++) {
+ addon = gf_list_get(scene->declared_addons, i);
+ if (addon->timeline_id==addon_info->timeline_id) {
+ if (addon_info->reload_external) {
+ //send message to service handler
+ }
+ return;
+ }
+ }
+
+ if (!addon_info->external_URL) {
+ //NULL (nothing) will be active soon
+ if (addon_info->activation_countdown) return;
+ //otherwise reset addon
+ if (scene->active_addon) scene_reset_addon(scene->active_addon, 1);
+ scene->active_addon = NULL;
+ return;
+ }
+
+ GF_SAFEALLOC(addon, GF_AddonMedia);
+ addon->timeline_id = addon_info->timeline_id;
+ addon->is_splicing = addon_info->is_splicing;
+ addon->activation_time = gf_scene_get_time(scene)+addon_info->activation_countdown;
+ addon->url = gf_strdup(addon_info->external_URL);
+ addon->media_timescale = 1;
+ addon->timeline_ready = (addon_info->timeline_id<0) ? 1 : 0;
+ if (addon->timeline_ready && !scene->active_addon) scene->active_addon = addon;
+ gf_list_add(scene->declared_addons, addon);
+
+
+ evt.type = GF_EVENT_ADDON_DETECTED;
+ evt.addon_connect.addon_url = addon->url;
+ addon->enabled = gf_term_send_event(scene->root_od->term,&evt);
+
+ if (addon->timeline_ready)
+ load_associated_media(scene, addon);
+}
+
+void gf_scene_notify_associated_media_timeline(GF_Scene *scene, GF_AssociatedContentTiming *addon_time)
+{
+ GF_AddonMedia *addon = scene->active_addon;
+ //locate the active timeline
+ if (!scene->active_addon || (scene->active_addon->timeline_id!=addon_time->timeline_id)) {
+ u32 i, count = gf_list_count(scene->declared_addons);
+ for (i=0; i<count; i++) {
+ addon = gf_list_get(scene->declared_addons, i);
+ if (addon->timeline_id==addon_time->timeline_id)
+ break;
+ addon = NULL;
+ }
+ if (!addon) return;
+
+ count = i;
+ for (i=0; i<count; i++) {
+ GF_AddonMedia *prev_addon = gf_list_get(scene->declared_addons, i);
+ //we are adding a non splicing point: discard all previously declared addons
+ if (!addon->is_splicing
+ //this is a splicing point, discard all previsously declared splicing addons
+ || prev_addon->is_splicing
+ ) {
+ Bool discard = GF_FALSE;
+ scene_reset_addon(prev_addon, GF_TRUE);
+ gf_list_rem(scene->declared_addons, i);
+ i--;
+ count--;
+ }
+ }
+
+ scene->active_addon = addon;
+ if (!scene->active_addon->timeline_ready) {
+ scene->active_addon->timeline_ready = GF_TRUE;
+ load_associated_media(scene, addon);
+ }
+ }
+
+ assert(scene->active_addon->timeline_id == addon_time->timeline_id);
+ scene->active_addon->media_pts = addon_time->media_pts;
+ scene->active_addon->media_timestamp = addon_time->media_timestamp;
+ scene->active_addon->media_timescale = addon_time->media_timescale;
+}
+
+u32 gf_scene_adjust_time_for_addon(GF_Scene *scene, u32 clock_time, GF_AddonMedia *addon)
+{
+ s64 media_ts_ms;
+ if (!addon->timeline_ready)
+ return clock_time;
+ assert(scene->root_od->addon);
+ assert(scene->root_od->addon==addon);
+
+ media_ts_ms = clock_time;
+
+ media_ts_ms -= (addon->media_pts/90);
+ media_ts_ms += (addon->media_timestamp*1000) / addon->media_timescale;
+ return (u32) media_ts_ms;
+}
+
+u64 gf_scene_adjust_timestamp_for_addon(GF_Scene *scene, u64 orig_ts, GF_AddonMedia *addon)
+{
+ s64 media_ts_ms;
+ assert(addon->timeline_ready);
+ assert(scene->root_od->addon);
+ assert(scene->root_od->addon==addon);
+
+ media_ts_ms = orig_ts;
+ media_ts_ms -= (addon->media_timestamp*1000) / addon->media_timescale;
+ media_ts_ms += (addon->media_pts/90);
+
+ return (u64) media_ts_ms;
+}
+
+void gf_scene_select_scalable_addon(GF_Scene *scene, GF_ObjectManager *odm)
+{
+ GF_NetworkCommand com;
+ GF_CodecCapability caps;
+ Bool nalu_annex_b;
+ GF_Channel *ch;
+ GF_ObjectManager *odm_base = NULL;
+ u32 i, count, mtype;
+ ch = gf_list_get(odm->channels, 0);
+ if (!ch->esd) return;
+ mtype = ch->esd->decoderConfig->streamType;
+ count = gf_list_count(scene->resources);
+ for (i=0; i<count; i++) {
+ odm_base = gf_list_get(scene->resources, i);
+ if ((mtype==odm_base->codec->type) && odm_base->codec)
+ break;
+ odm_base=NULL;
+ //todo check if we use compatible formats, for now we only do demos with hevc/shvc
+ }
+ if (!odm_base) return;
+
+ odm_base->scalable_odm = odm;
+
+ nalu_annex_b = 1;
+ ch = gf_list_get(odm_base->channels, 0);
+ if (ch->esd->decoderConfig->decoderSpecificInfo && ch->esd->decoderConfig->decoderSpecificInfo->dataLength)
+ nalu_annex_b = 0;
+
+ memset(&com, 0, sizeof(GF_NetworkCommand));
+ com.command_type = GF_NET_CHAN_NALU_MODE;
+ com.nalu_mode.extract_mode = nalu_annex_b ? 1 : 0;
+ count = gf_list_count(odm->channels);
+ for (i=0; i<count; i++) {
+ com.base.on_channel = ch = gf_list_get(odm->channels, i);
+ gf_term_service_command(ch->service, &com);
+ }
+
+ //signal to the base decoder that we will want full quality
+ caps.CapCode = GF_CODEC_MEDIA_SWITCH_QUALITY;
+ caps.cap.valueInt = 2;
+// odm_base->codec->decio->SetCapabilities(odm_base->codec->decio, caps);
+}
}
}
+static void gf_term_on_node_destroyed(void *_is, GF_Node *node)
+{
+ GF_Scene *scene = (GF_Scene *)_is;
+ if (!scene) return;
+ gf_sc_node_destroy(scene->root_od->term->compositor, node, NULL);
+}
+
GF_EXPORT
void gf_term_node_callback(void *_is, u32 type, GF_Node *n, void *param)
{
case GF_SG_CALLBACK_MODIFIED:
gf_term_on_node_modified(_is, n);
break;
+ case GF_SG_CALLBACK_NODE_DESTROY:
+ gf_term_on_node_destroyed(_is, n);
+ break;
case GF_SG_CALLBACK_INIT:
gf_term_on_node_init(_is, n);
break;
/*reload term part*/
sOpt = gf_cfg_get_key(term->user->config, "Systems", "DrawLateFrames");
- if (sOpt && !stricmp(sOpt, "yes"))
- term->flags &= ~GF_TERM_DROP_LATE_FRAMES;
- else
+ if (sOpt && !stricmp(sOpt, "no"))
term->flags |= GF_TERM_DROP_LATE_FRAMES;
+ else
+ term->flags &= ~GF_TERM_DROP_LATE_FRAMES;
sOpt = gf_cfg_get_key(term->user->config, "Systems", "ForceSingleClock");
if (sOpt && !stricmp(sOpt, "yes"))
else if (!stricmp(sOpt, "Multi")) mode = GF_TERM_THREAD_MULTI;
gf_term_set_threading(term, mode);
}
+ } else {
+ gf_term_set_threading(term, GF_TERM_THREAD_SINGLE);
}
/*default data timeout is 20 sec*/
u32 val;
if (ch->service != net) continue;
- gf_mx_p(ch->mx);
-
media_event->bufferValid = GF_TRUE;
if (ch->BufferTime>0) {
if (ch->MaxBuffer) {
*min_time = 0;
*min_buffer = 0;
}
- gf_mx_v(ch->mx);
}
}
#endif
{
#ifndef GPAC_DISABLE_SVG
u32 i, count, min_buffer, min_time;
- Bool locked;
GF_DOMMediaEvent media_event;
GF_DOM_Event evt;
GF_ObjectManager *an_od;
if (!odm || !odm->net_service) return;
if (odm->mo) {
count = gf_mo_event_target_count(odm->mo);
+
+ //for dynamic scenes, check if we have listeners on the root object of the scene containing this media
+ if (!count
+ && odm->parentscene
+ && odm->parentscene->is_dynamic_scene
+ && odm->parentscene->root_od->mo
+ && (odm->parentscene->root_od->net_service==odm->net_service)
+ ) {
+ odm = odm->parentscene->root_od;
+ count = gf_mo_event_target_count(odm->mo);
+ }
if (!count) return;
+
if (0 && !(gf_node_get_dom_event_filter((GF_Node *)gf_event_target_get_node(gf_mo_event_target_get(odm->mo, 0))) & GF_DOM_EVENT_MEDIA))
return;
} else {
evt.type = event_type;
evt.bubbles = 0; /*the spec says yes but we force it to NO*/
- /*lock scene to prevent concurrent access of scene data*/
- locked = gf_mx_try_lock(odm->term->compositor->mx);
- if (!locked) return;
-
+ //these events may be triggered from any input or decoding threads. Sync processing cannot be
+ //achieved in most cases, because we may run into deadlocks, especially if the event
+ //was triggered by a service opened by JS
for (i=0; i<count; i++) {
GF_DOMEventTarget *target = (GF_DOMEventTarget *)gf_list_get(odm->mo->evt_targets, i);
- sg_fire_dom_event(target, &evt, scene->graph, NULL);
+ if (target)
+ gf_sc_queue_dom_event_on_target(scene->root_od->term->compositor, &evt, target, scene->graph);
}
if (!count) {
GF_Node *root = gf_sg_get_root_node(scene->graph);
- if (root) gf_dom_event_fire(root, &evt);
+ if (root) gf_sc_queue_dom_event(scene->root_od->term->compositor, root, &evt);
}
- gf_sc_lock(odm->term->compositor, GF_FALSE);
#endif
}
return gf_scene_execute_script(term->root_scene->graph, com);
}
+ if (!type && com && !strncmp(com, "gpac ", 5)) {
+ com += 5;
+ //new add-on
+ if (term->root_scene && !strncmp(com, "add ", 4)) {
+ GF_AssociatedContentLocation addon_info;
+ memset(&addon_info, 0, sizeof(GF_AssociatedContentLocation));
+ addon_info.external_URL = com + 4;
+ addon_info.timeline_id = -1;
+ gf_scene_register_associated_media(term->root_scene, &addon_info);
+ }
+ return GF_OK;
+ }
+
memset(&load, 0, sizeof(GF_SceneLoader));
load.localPath = gf_cfg_get_key(term->user->config, "General", "CacheDirectory");
load.flags = GF_SM_LOAD_FOR_PLAYBACK | GF_SM_LOAD_CONTEXT_READY;
the_row --;
if (flip) the_row = src->height-2 - the_row;
if (yuv_planar_type==1) {
- load_line_yv12(src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp, src->u_ptr, src->v_ptr);
+ load_line_yv12(src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp, (u8 *) src->u_ptr, (u8 *) src->v_ptr);
} else if (yuv_planar_type==3) {
- load_line_yv12_10(src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp, src->u_ptr, src->v_ptr);
+ load_line_yv12_10((char *) src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp,(u8 *) src->u_ptr, (u8 *) src->v_ptr);
} else {
- load_line_yuva(src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp, src->u_ptr, src->v_ptr, src->a_ptr);
+ load_line_yuva(src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp, (u8 *) src->u_ptr, (u8 *) src->v_ptr, (u8 *) src->a_ptr);
}
the_row = src_row - 1;
if (cmat) {
for (i=0; i<2*src_w; i++) {
u32 idx = 4*i;
- gf_cmx_apply_argb(cmat, &tmp[idx+3], &tmp[idx], &tmp[idx+1], &tmp[idx+2]);
+ gf_cmx_apply_argb(cmat, (u8 *) &tmp[idx+3], (u8 *) &tmp[idx], (u8 *) &tmp[idx+1], (u8 *) &tmp[idx+2]);
}
}
if (key) {
} else {
if (flip) the_row = src->height-2 - the_row;
if (yuv_planar_type==1) {
- load_line_yv12(src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp, src->u_ptr, src->v_ptr);
+ load_line_yv12(src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp, (u8 *) src->u_ptr, (u8 *) src->v_ptr);
} else if (yuv_planar_type==3) {
- load_line_yv12_10(src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp, src->u_ptr, src->v_ptr);
+ load_line_yv12_10(src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp, (u8 *) src->u_ptr, (u8 *) src->v_ptr);
} else {
- load_line_yuva(src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp, src->u_ptr, src->v_ptr, src->a_ptr);
+ load_line_yuva(src->video_buffer, x_off, the_row, src->pitch_y, src_w, src->height, tmp, (u8 *) src->u_ptr,(u8 *) src->v_ptr, (u8 *) src->a_ptr);
}
yuv_init = 1;
rows = flip ? tmp + src_w * 4 : tmp;
*g = INT2FIX(GF_COL_G(col)) / 255;
*b = INT2FIX(GF_COL_B(col)) / 255;
}
+
+
+
+#ifdef WIN32
+# include <intrin.h>
+# define GPAC_HAS_SSE2
+#else
+# ifdef __SSE2__
+# include <emmintrin.h>
+# define GPAC_HAS_SSE2
+# endif
+#endif
+
+#ifdef GPAC_HAS_SSE2
+
+static GF_Err gf_color_write_yv12_10_to_yuv_intrin(GF_VideoSurface *vs_dst, unsigned char *pY, unsigned char *pU, unsigned char*pV, u32 src_stride, u32 src_width, u32 src_height, const GF_Window *_src_wnd)
+{
+ u32 i, j, w, h;
+ if (!pU) {
+ pU = pY + src_stride * src_height;
+ pV = pY + 5*src_stride * src_height/4;
+ }
+
+ if (_src_wnd) {
+ pY = pY + src_stride * _src_wnd->y + _src_wnd->x;
+ /*because of U and V downsampling by 2x2, working with odd Y offset will lead to a half-line shift between Y and UV components. We
+ therefore force an even Y offset for U and V planes.*/
+ pU = pU + (src_stride * (_src_wnd->y / 2) + _src_wnd->x) / 2;
+ pV = pV + (src_stride * (_src_wnd->y / 2) + _src_wnd->x) / 2;
+ w = _src_wnd->w;
+ h = _src_wnd->h;
+ } else {
+ w = src_width;
+ h = src_height;
+ }
+
+ if (vs_dst->pixel_format == GF_PIXEL_YV12) {
+ __m128i val1, val2, val_dst, *src1, *src2, *dst;
+ for (i=0; i<h; i++) {
+ src1 = (__m128i *)(pY + i*src_stride);
+ src2 = src1+1;
+ dst = (__m128i *)(vs_dst->video_buffer + i*vs_dst->pitch_y);
+
+ for (j=0; j<w/16; j++, src1+=2, src2+=2, dst++) {
+ val1 = _mm_load_si128(src1);
+ val1 = _mm_srli_epi16(val1, 2);
+ val2 = _mm_load_si128(src2);
+ val2 = _mm_srli_epi16(val2, 2);
+ val_dst = _mm_packus_epi16(val1, val2);
+ _mm_store_si128(dst, val_dst);
+ }
+ }
+
+ for (i=0; i<h/2; i++) {
+ src1 = (__m128i *) (pU + i*src_stride/2);
+ src2 = src1+1;
+ dst = (__m128i *)(vs_dst->video_buffer + vs_dst->pitch_y * vs_dst->height + i*vs_dst->pitch_y/2);
+
+ for (j=0; j<w/32; j++, src1+=2, src2+=2, dst++) {
+ val1 = _mm_load_si128(src1);
+ val1 = _mm_srli_epi16(val1, 2);
+ val2 = _mm_load_si128(src2);
+ val2 = _mm_srli_epi16(val2, 2);
+ val_dst = _mm_packus_epi16(val1, val2);
+ _mm_store_si128(dst, val_dst);
+ }
+ }
+
+ for (i=0; i<h/2; i++) {
+ src1 = (__m128i *) (pV + i*src_stride/2);
+ src2 = src1+1;
+ dst = (__m128i *)(vs_dst->video_buffer + 5*vs_dst->pitch_y * vs_dst->height/4 + i*vs_dst->pitch_y/2);
+
+ for (j=0; j<w/32; j++, src1+=2, src2+=2, dst++) {
+ val1 = _mm_load_si128(src1);
+ val1 = _mm_srli_epi16(val1, 2);
+ val2 = _mm_load_si128(src2);
+ val2 = _mm_srli_epi16(val2, 2);
+ val_dst = _mm_packus_epi16(val1, val2);
+ _mm_store_si128(dst, val_dst);
+ }
+ }
+ return GF_OK;
+ }
+ return GF_NOT_SUPPORTED;
+}
+#endif
+
+
+GF_EXPORT
+GF_Err gf_color_write_yv12_10_to_yuv(GF_VideoSurface *vs_dst, unsigned char *pY, unsigned char *pU, unsigned char*pV, u32 src_stride, u32 src_width, u32 src_height, const GF_Window *_src_wnd)
+{
+ u32 i, j, w, h;
+
+ if (_src_wnd) {
+ w = _src_wnd->w;
+ h = _src_wnd->h;
+ } else {
+ w = src_width;
+ h = src_height;
+ }
+
+
+#ifdef GPAC_HAS_SSE2
+
+#ifdef GPAC_64_BITS
+#define GFINTCAST (u64)
+#else
+#define GFINTCAST (u32)
+#endif
+
+ if ( (w%32 == 0)
+ && (GFINTCAST (vs_dst->video_buffer + vs_dst->pitch_y)%8 == 0)
+ && (GFINTCAST (vs_dst->video_buffer + vs_dst->pitch_y * vs_dst->height + vs_dst->pitch_y/2)%8 == 0)
+ && (GFINTCAST (pU + src_stride/2)%8 == 0)
+ && (GFINTCAST (pV + src_stride/2)%8 == 0)
+ ) {
+ return gf_color_write_yv12_10_to_yuv_intrin(vs_dst, pY, pU, pV, src_stride, src_width, src_height, _src_wnd);
+ }
+#endif
+
+ if (!pU) {
+ pU = pY + src_stride * src_height;
+ pV = pY + 5*src_stride * src_height/4;
+ }
+
+ if (_src_wnd) {
+ pY = pY + src_stride * _src_wnd->y + _src_wnd->x;
+ /*because of U and V downsampling by 2x2, working with odd Y offset will lead to a half-line shift between Y and UV components. We
+ therefore force an even Y offset for U and V planes.*/
+ pU = pU + (src_stride * (_src_wnd->y / 2) + _src_wnd->x) / 2;
+ pV = pV + (src_stride * (_src_wnd->y / 2) + _src_wnd->x) / 2;
+ }
+
+ if (vs_dst->pixel_format == GF_PIXEL_YV12) {
+ for (i=0; i<h; i++) {
+ u16 *src = (u16 *) (pY + i*src_stride);
+ u8 *dst = (u8 *) vs_dst->video_buffer + i*vs_dst->pitch_y;
+
+ for (j=0; j<w;j++) {
+ *dst = (*src) >> 2;
+ dst++;
+ src++;
+ }
+ }
+
+ for (i=0; i<h/2; i++) {
+ u16 *src = (u16 *) (pU + i*src_stride/2);
+ u8 *dst = (u8 *) vs_dst->video_buffer + vs_dst->pitch_y * vs_dst->height + i*vs_dst->pitch_y/2;
+
+ for (j=0; j<w/2;j++) {
+ *dst = (*src) >> 2;
+ dst++;
+ src++;
+ }
+ }
+
+ for (i=0; i<h/2; i++) {
+ u16 *src = (u16 *) (pV + i*src_stride/2);
+ u8 *dst = (u8 *) vs_dst->video_buffer + 5*vs_dst->pitch_y * vs_dst->height/4 + i*vs_dst->pitch_y/2;
+
+ for (j=0; j<w/2;j++) {
+ *dst = (*src) >> 2;
+ dst++;
+ src++;
+ }
+ }
+ return GF_OK;
+ }
+ return GF_NOT_SUPPORTED;
+}
+
}
-static u8 *gf_dm_get_chunk_data(GF_DownloadSession *sess, u8 *body_start, u32 *payload_size, u32 *header_size)
+static char *gf_dm_get_chunk_data(GF_DownloadSession *sess, char *body_start, u32 *payload_size, u32 *header_size)
{
u32 size;
char *te_header, *sep;
}
- te_header = strstr(body_start, "\r\n");
+ te_header = strstr((char *) body_start, "\r\n");
if (!te_header) return NULL;
te_header[0] = 0;
hdr_size = 0;
remaining = 0;
if (sess->chunked) {
- data = gf_dm_get_chunk_data(sess, payload, &nbBytes, &hdr_size);
+ data = (u8 *) gf_dm_get_chunk_data(sess, (char *) payload, &nbBytes, &hdr_size);
if (hdr_size + nbBytes + 2 > payload_size) {
remaining = nbBytes + 2 - payload_size + hdr_size;
nbBytes = payload_size - hdr_size;
}
if (sess->icy_metaint > 0)
- gf_icy_skip_data(sess, sess->icy_metaint, data, nbBytes);
+ gf_icy_skip_data(sess, sess->icy_metaint, (char *) data, nbBytes);
else {
if (sess->use_cache_file)
- gf_cache_write_to_cache( sess->cache_entry, sess, data, nbBytes);
+ gf_cache_write_to_cache( sess->cache_entry, sess, (char *) data, nbBytes);
par.msg_type = GF_NETIO_DATA_EXCHANGE;
par.error = GF_OK;
- par.data = data;
+ par.data = (char *) data;
par.size = nbBytes;
par.reply = flush_chunk;
gf_dm_sess_user_io(sess, &par);
if (e) return e;
size = *read_size;
*read_size = 0;
- gf_dm_data_received(sess, buffer, size, 0, read_size);
+ gf_dm_data_received(sess, (u8 *) buffer, size, 0, read_size);
return GF_OK;
}
if (e == GF_IP_CONNECTION_CLOSED){
u32 len = gf_cache_get_content_length(sess->cache_entry);
if (size > 0)
- gf_dm_data_received(sess, sHTTP, size, 0, NULL);
+ gf_dm_data_received(sess, (u8 *) sHTTP, size, 0, NULL);
if ( ( (len == 0) && sess->use_cache_file)
/*ivica patch*/
|| (size==0)
gf_dm_sess_notify_state(sess, sess->status, e);
return e;
}
- gf_dm_data_received(sess, sHTTP, size, 0, NULL);
+ gf_dm_data_received(sess, (u8 *) sHTTP, size, 0, NULL);
/*socket empty*/
if (size < GF_DOWNLOAD_BUFFER_SIZE) {
sess->use_cache_file = 0;
}
- GF_LOG(GF_LOG_ERROR, GF_LOG_NETWORK,
- (e ? ("[HTTP] Error connecting to %s: %s\n", sess->server_name, gf_error_to_string(e) )
- : ("[HTTP] Connected to %s\n", sess->server_name )
- ));
+#ifndef GPAC_DISABLE_LOGS
+ if (e) {
+ GF_LOG(GF_LOG_ERROR, GF_LOG_NETWORK, ("[HTTP] Error connecting to %s: %s\n", sess->server_name, gf_error_to_string(e) ) );
+ } else {
+ GF_LOG(GF_LOG_INFO, GF_LOG_NETWORK, ("[HTTP] Connected to %s\n", sess->server_name ) );
+ }
+#endif
/*some servers may reply without content length, but we MUST have it*/
if (e) goto exit;
sess->init_data_size = 0;
sess->init_data = NULL;
- gf_dm_data_received(sess, sHTTP + BodyStart, bytesRead - BodyStart, 1, NULL);
+ gf_dm_data_received(sess, (u8 *) sHTTP + BodyStart, bytesRead - BodyStart, 1, NULL);
}
exit:
if (e) {
#ifdef GPAC_STATIC_MODULES
GF_InterfaceRegister *pr;
+#ifdef GPAC_HAS_FAAD
LOAD_PLUGIN(aac_in);
+#endif
+#ifdef GPAC_HAS_AC3
LOAD_PLUGIN(ac3);
+#endif
#ifdef GPAC_HAS_ALSA
LOAD_PLUGIN(alsa);
#endif
#ifndef GPAC_DISABLE_SVG
LOAD_PLUGIN(laser);
#endif
+#ifdef GPAC_HAS_MAD
LOAD_PLUGIN(mp3_in);
+#endif
LOAD_PLUGIN(mpd_in);
#ifndef GPAC_DISABLE_MEDIA_IMPORT
LOAD_PLUGIN(mpegts_in);
#ifdef GPAC_HAS_WAVEOUT
LOAD_PLUGIN(wave_out);
#endif
+#ifndef GPAC_DISABLE_TTXT
+ LOAD_PLUGIN(vtt_in);
+#endif
#ifndef GPAC_DISABLE_SVG
LOAD_PLUGIN(widgetman);
#endif
#ifdef GPAC_HAS_XVID
LOAD_PLUGIN(xvid);
#endif
-
- LOAD_PLUGIN(ffmpeg);
-
-
-
+
//todo fix project for iOS
#ifdef GPAC_IPHONE
// LOAD_PLUGIN(ios_cam);
#define SLEEP_ABS_SELECT 1
static u32 sys_start_time = 0;
+static u64 sys_start_time_hr = 0;
#endif
gettimeofday(&now, NULL);
return ( (now.tv_sec)*1000 + (now.tv_usec) / 1000) - sys_start_time;
}
+
+GF_EXPORT
+u64 gf_sys_clock_high_res()
+{
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ return (now.tv_sec)*1000000 + (now.tv_usec) - sys_start_time_hr;
+}
+
#endif
{
return OS_GetSysClock();
}
+
+
+static u64 (*OS_GetSysClockHR)();
+u64 gf_sys_clock_high_res()
+{
+ return OS_GetSysClockHR();
+}
#endif
return (u32) ((now.QuadPart * 1000) / frequency.QuadPart);
}
+static u64 OS_GetSysClockHIGHRES_FULL()
+{
+ LARGE_INTEGER now;
+ QueryPerformanceCounter(&now);
+ now.QuadPart -= init_counter.QuadPart;
+ return (u64) ((now.QuadPart * 1000000) / frequency.QuadPart);
+}
+
static u32 OS_GetSysClockNORMAL()
{
#ifdef _WIN32_WCE
#endif
}
+static u64 OS_GetSysClockNORMAL_FULL()
+{
+ u64 res = OS_GetSysClockNORMAL();
+ return res*1000;
+}
+
#endif /* WIN32 */
#if defined(__sh__)
if (QueryPerformanceFrequency(&frequency)) {
QueryPerformanceCounter(&init_counter);
OS_GetSysClock = OS_GetSysClockHIGHRES;
+ OS_GetSysClockHR = OS_GetSysClockHIGHRES_FULL;
GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("[core] using WIN32 performance timer\n"));
} else {
OS_GetSysClock = OS_GetSysClockNORMAL;
+ OS_GetSysClockHR = OS_GetSysClockNORMAL_FULL;
GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("[core] using WIN32 regular timer\n"));
}
#endif
sys_start_time = gf_sys_clock();
+ sys_start_time_hr = gf_sys_clock_high_res();
#endif
GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("[core] process id %d\n", the_rti.pid));