Im researching into WebAudio API and HTML5/Javascript as a learning process, i've looked into the code of some peoples project to try and break it down as a kind of learning curve but something is puzzling me. The code below is some JS of Audio on a play/stop button along with a LOWPASS filter and Quality slider, how would this be changed to Autoplay instead of the play/stop button?
Its confusing me like crazy.
var QUAL_MUL = 30;
function FilterSample() {
this.isPlaying = false;
loadSounds(this, {buffer: '02.mp3'});
};
FilterSample.prototype.play = function() {
// Create the source.
var source = context.createBufferSource();
source.buffer = this.buffer;
// Create the filter.
var filter = context.createBiquadFilter();
filter.type = filter.LOWPASS;
filter.frequency.value = 5000;
// Connect source to filter, filter to destination.
source.connect(filter);
filter.connect(context.destination);
// Play!
source.start(0);
source.loop = true;
//!-- THIS DOESN'T WORK source.autoplay = true;
// Save source and filterNode for later access.
this.source = source;
this.filter = filter;
};
// PAUSE Button Function
FilterSample.prototype.stop = function() {
this.source.stop(0);
};
// Play Button Toggle Function
FilterSample.prototype.toggle = function() {
this.isPlaying ? this.stop() : this.play();
this.isPlaying = !this.isPlaying;
};
FilterSample.prototype.changeFrequency = function(element) {
var minValue = 40;
var maxValue = context.sampleRate / 2;
var numberOfOctaves = Math.log(maxValue / minValue) / Math.LN2;
var multiplier = Math.pow(2, numberOfOctaves * (element.value - 1.0));
this.filter.frequency.value = maxValue * multiplier;
};
FilterSample.prototype.changeQuality = function(element) {
this.filter.Q.value = element.value * QUAL_MUL;
};
FilterSample.prototype.toggleFilter = function(element) {
this.source.disconnect(0);
this.filter.disconnect(0);
if (element.checked) {
this.source.connect(this.filter);
this.filter.connect(context.destination);
} else {
this.source.connect(context.destination);
}
};
Cheers in advance if you can help me understand this
You need to modify loadSounds to accept a callback function that gets executed when all the sounds have been fetched/loaded/whatever.
Then just change your constructor to:
function FilterSample() {
this.isPlaying = false;
loadSounds(this, {buffer: '02.mp3'}, function() {
this.play();
}.bind(this));
};
Related
Quick look
I'm using Sketch.js plugin in this example. I
would like to use my pre-signed urls as well but they don't work.
Expiration time is set long enough (1 day) so there's something wrong
with the JS itself.
I have an S3 bucket where I store some music public protected.
Using the official AWS SDK I can generate urls like:
https://d225******.cloudfront.net/song.m4a?Expires=1493381986&Signature=***&Key-Pair-Id=***
I'm using pre-signed urls over my website without any problem, but in this script won't work:
<script>
var ALPHA, AudioAnalyser, COLORS, MP3_PATH, NUM_BANDS, NUM_PARTICLES, Particle, SCALE, SIZE, SMOOTHING, SPEED, SPIN;
MP3_PATH = 'my_presigned_url';
AudioAnalyser = (function() {
AudioAnalyser.AudioContext = self.AudioContext || self.webkitAudioContext;
AudioAnalyser.enabled = AudioAnalyser.AudioContext != null;
function AudioAnalyser(audio, numBands, smoothing) {
var src;
this.audio = audio != null ? audio : new Audio();
this.numBands = numBands != null ? numBands : 256;
this.smoothing = smoothing != null ? smoothing : 0.3;
if (typeof this.audio === 'string') {
src = this.audio;
this.audio = new Audio();
this.audio.crossOrigin = "anonymous";
this.audio.controls = true;
this.audio.src = src;
}
this.context = new AudioAnalyser.AudioContext();
this.jsNode = this.context.createScriptProcessor(2048, 1, 1);
this.analyser = this.context.createAnalyser();
this.analyser.smoothingTimeConstant = this.smoothing;
this.analyser.fftSize = this.numBands * 2;
this.bands = new Uint8Array(this.analyser.frequencyBinCount);
this.audio.addEventListener('canplay', (function(_this) {
return function() {
_this.source = _this.context.createMediaElementSource(_this.audio);
_this.source.connect(_this.analyser);
_this.analyser.connect(_this.jsNode);
_this.jsNode.connect(_this.context.destination);
_this.source.connect(_this.context.destination);
return _this.jsNode.onaudioprocess = function() {
_this.analyser.getByteFrequencyData(_this.bands);
if (!_this.audio.paused) {
return typeof _this.onUpdate === "function" ? _this.onUpdate(_this.bands) : void 0;
}
};
};
})(this));
}
AudioAnalyser.prototype.start = function() {
return this.audio.play();
};
AudioAnalyser.prototype.stop = function() {
return this.audio.pause();
};
return AudioAnalyser;
})();
Sketch.create({
particles: [],
setup: function() {
var analyser, error, i, intro, j, particle, ref, warning, x, y;
for (i = j = 0, ref = NUM_PARTICLES - 1; j <= ref; i = j += 1) {
x = random(this.width);
y = random(this.height * 2);
particle = new Particle(x, y);
particle.energy = random(particle.band / 256);
this.particles.push(particle);
}
if (AudioAnalyser.enabled) {
try {
analyser = new AudioAnalyser(MP3_PATH, NUM_BANDS, SMOOTHING);
analyser.onUpdate = (function(_this) {
return function(bands) {
var k, len, ref1, results;
ref1 = _this.particles;
results = [];
for (k = 0, len = ref1.length; k < len; k++) {
particle = ref1[k];
results.push(particle.energy = bands[particle.band] / 256);
}
return results;
};
})(this);
analyser.start();
document.getElementById('player-container').appendChild(analyser.audio);
document.getElementsByTagName("audio")[0].setAttribute("id", "dy_wowaudio");
intro = document.getElementById('intro');
intro.style.display = 'none';
} catch (_error) {
error = _error;
}
}
}
});
// generated by coffee-script 1.9.2
</script>
The script works fine (as you can see in the example above) without a pre-signed url, so what can I do to use my pre-signed urls inside AudioAnalyser function?
I've seen html5 video tags make multiple requests. I assume to fetch some meta-data like play length and the first frame of video to use as a thumbnail. You might try playing with the preload attribute to prevent this.
Specifically, if the clip is small, preload="auto" might be everything you need. If the browser has to make follow requests you're gonna have a hard time I think. Here's some relevant info
Another way to go about this that I think may work more reliably is to generate temporary credentials as needed.
See the docs for more on this:
- Requesting temp creds
- Accessing resources with temp creds
Combined with a JS package for signing AWS requests like binoculars/aws-sigv4 or copy someone else who's doing this in-browser.
Share your browser error message if any. It may by problem related to cross origin for S3 bucket data.
I have an html5 video element and I need to apply different processing realtime on the video's output audio. On desktop I made it work with the WebAudio API. The Api is seemingly present on iOS also. I am able to inspect the created objects, but it doesn't modify the video's output signal.
Here's my example code:
$(function () {
window.AudioContext = window.AudioContext||window.webkitAudioContext;
var audioContext = new AudioContext();
var bufferSize = 1024;
var selectedChannel = 0;
var effect = (function() {
var node = audioContext.createScriptProcessor(bufferSize, 2, 2);
node.addEventListener('audioprocess', function(e) {
var input = e.inputBuffer.getChannelData(selectedChannel);
var outputL = e.outputBuffer.getChannelData(0);
var outputR = e.outputBuffer.getChannelData(1);
for (var i = 0; i < bufferSize; i++) {
outputL[i] = selectedChannel==0? input[i] : 0.0;
outputR[i] = selectedChannel==1? input[i] : 0.0;
}
});
return node;
})();
var streamAttached = false;
function attachStream(video) {
if (streamAttached) {
return;
}
var source = audioContext.createMediaElementSource(video);
source.connect(effect);
effect.connect(audioContext.destination);
streamAttached = true;
}
function iOS_video_touch_start() {
var video = $('#vid')[0];
video.play();
attachStream(video);
}
var needtouch = false;
$('#vid').on('play', function () {
attachStream(this);
}).on('loadedmetadata', function () {
this.play();
this.volume=1.0;
if (this && this.paused) {
if (needtouch == false) {
needtouch = true;
this.addEventListener("touchstart", iOS_video_touch_start, true);
}
}
});
window.panToRight = function(){
selectedChannel = 1;
};
window.panToLeft = function(){
selectedChannel = 0;
};
});
You can also check it on CP:
http://codepen.io/anon/pen/pgeJQG
With the buttons you are able to toggle between the left and the right channels. On desktop browsers (Chrome, Firefox, Safari tested) it works fine.
I have also tried the older createJavaScriptNode() instead of createScriptProcessor(). I have also tried it with an alternative effect chain, which was looking like this:
var audioContext = new (window.AudioContext||window.webkitAudioContext)();
audioContext.createGain = audioContext.createGain||audioContext.createGainNode;
var gainL = audioContext.createGain();
var gainR = audioContext.createGain();
gainL.gain.value = 1;
gainR.gain.value = 1;
var merger = audioContext.createChannelMerger(2);
var splitter = audioContext.createChannelSplitter(2);
//Connect to source
source = audioContext.createMediaElementSource(video);
//Connect the source to the splitter
source.connect(splitter, 0, 0);
//Connect splitter' outputs to each Gain Nodes
splitter.connect(gainL, 0);
splitter.connect(gainR, 1);
//Connect Left and Right Nodes to the Merger Node inputs
//Assuming stereo as initial status
gainL.connect(merger, 0, 0);
gainL.connect(merger, 0, 1);
//Connect Merger output to context destination
merger.connect(audioContext.destination, 0, 0);
As you probably noticed this code was using the built in nodes only. But no luck.
So my questions are: Is this even possible on mobile? If it is, than what am I missing? If it is not, than any possible workaround? Thanks
With Chrome on Android, MediaElementSource is not currently routed to WebAudio. This is a known issue and is planned to be fixed eventually.
I am making an audio recorder using HTML5 and Javascript and do not want to include any third party API, I reached at my first step by creating an audio retriever and player using <audio> tag and navigator.webkitGetUserMedia Function which get audio from my microphone and play in through <audio> element but I am not able to get the audio data in an array at this point I don't know what to do which function to use.
simple just create a audio node, below is tweaked code from MattDiamond's RecorderJS:
function RecordAudio(stream, cfg){
var config = cfg || {};
var bufferLen = config.bufferLen || 4096;
var numChannels = config.numChannels || 2;
this.context = stream.context;
var recordBuffers = [];
var recording = false;
this.node = (this.context.createScriptProcessor ||
this.context.createJavaScriptNode).call(this.context,
bufferLen, numChannels, numChannels);
stream.connect(this.node);
this.node.connect(this.context.destination);
this.node.onaudioprocess = function(e){
if (!recording) return;
for (var i = 0; i < numChannels; i++){
if(!recordBuffers[i]) recordBuffers[i] = [];
recordBuffers[i].push.apply(recordBuffers[i], e.inputBuffer.getChannelData(i));
}
}
this.getData = function(){
var tmp = recordBuffers;
recordBuffers = [];
return tmp; // returns an array of array containing data from various channels
};
this.start() = function(){
recording = true;
};
this.stop() = function(){
recording = false;
};
}
example usage:
var recorder = new RecordAudio(userMedia);
recorder.start();
recorder.stop();
var recordedData = recorder.getData();
I have a webcam streaming app based on the webcam.fla example by Wowza. The app streams audio and video from Flash to a Wowza server where it's transcoded etc.
We're trying to add a feature that lets the audio source be changed to any other system audio source. So far we successfully create a dropdown containing all the interfaces and handle the callback but, despite starting and stopping the stream with the doConnect() function, the audio source seems to remain the default.
import flash.media.*;
import flash.geom.*;
import flash.net.*;
import flash.media.*;// Should this be duplicated
var parsed:Object = root.loaderInfo.parameters;
var nc:NetConnection = null;
var nsPublish:NetStream = null;
var nsPlay:NetStream = null;
var camera:Camera = null;
var microphone:Microphone = null;
// Testing
var serverName:String = "rtmp://stream-na.example.tv:1935/live";
var movieName:String = "streamName";
var flushVideoBufferTimer:Number = 0;
// Quality settings
var videoBitrate:Number = 200000;
var videoQuality:Number = 80; // Quality %
var videoWidth:Number = 640;
var videoHeight:Number = 360;
var videoFrameRate:Number = 30;
//////////////// UI Functions Bellow
import fl.controls.ComboBox;
import fl.data.DataProvider;
var aCb:ComboBox = new ComboBox();
function createAudioComboBox(sources)
{
var sourcesArray:Array = new Array();
aCb.dropdownWidth = 210;
aCb.width = 200;
aCb.move(0, 365);
aCb.prompt = "Change Audio Source";
aCb.dataProvider = new DataProvider(sourcesArray);
aCb.addEventListener(Event.CHANGE, changeAudioHandler);
addChild(aCb);
for (var index in sources)
{
//ExternalInterface.call("logBrowserStreaming", sources[index]);
aCb.addItem( { label: sources[index], data: index} );
}
function changeAudioHandler(event:Event):void
{
doConnect();
//var request:URLRequest = new URLRequest();
//request.url = ComboBox(event.target).selectedItem.data;
//navigateToURL(request);
//aCb.selectedIndex = -1;
var audioSource = ComboBox(event.target).selectedItem.data;
//microphone:Microphone = null;
microphone = Microphone.getMicrophone(audioSource);
microphone.rate = 16;
microphone.codec = SoundCodec.SPEEX;
microphone.encodeQuality = 10; // This is shit!! offer better audio in native app?
microphone.setSilenceLevel(0, -1);
microphone.setUseEchoSuppression(true);
//ExternalInterface.call("logBrowserStreaming", audioSource);
// Trigger restart camera...
//startCamera(); // Nope
doConnect();
}
}
//////////////// Core Streaming Functions Bellow
function startCamera()
{
// get the default Flash camera and microphone
camera = Camera.getCamera();
microphone = Microphone.getMicrophone();
// here are all the quality and performance settings
// here are all the quality and performance settings
if (camera != null)
{
//camera.setMode(1280, 720, 30, false);
camera.setMode(videoWidth, videoHeight, videoFrameRate, false); // false gives framerate priority apparently?? http://www.flash-communications.net/technotes/setMode/index.html
camera.setQuality(videoBitrate, videoQuality);
// Max 800kbps;
camera.setKeyFrameInterval(2);
// List audio sources names
// sourceVideoLabel.text += Camera.names;
// Create audio sources dropdown
// Hide video sources for now...
//createVideoComboBox(Camera.names);
}
else
{
sourceVideoLabel.text = "No Camera Found\n";
}
if ( microphone != null)
{
microphone.rate = 16;
microphone.codec = SoundCodec.SPEEX;
microphone.encodeQuality = 10; // This is shit!! offer better audio in native app?
microphone.setSilenceLevel(0, -1);
microphone.setUseEchoSuppression(true);
// List audio sources names;
// sourceVideoLabel.text += Microphone.names;
// Create audio sources dropdown
createAudioComboBox(Microphone.names);
// Don't show audio slider for now...
// createAudioSlider();
// Don't monitor audio level for now...
//monitorAudioLevel();
}
else
{
sourceVideoLabel.text += "No Microphone Found\n";
}
nameStr.text = movieName;
AppendCheckbox.selected = false;
connect.connectStr.text = serverName;
connect.connectButton.addEventListener(MouseEvent.CLICK, doConnect);
//enablePlayControls(false);
doConnect();
}
function ncOnStatus(infoObject:NetStatusEvent)
{
trace("nc: "+infoObject.info.code+" ("+infoObject.info.description+")");
if (infoObject.info.code == "NetConnection.Connect.Failed")
{
prompt.text = "Connection failed. Try again or email support#chew.tv";
}
else if (infoObject.info.code == "NetConnection.Connect.Rejected")
{
// Hide connect fail...
prompt.text = infoObject.info.description;
}
}
// Ask for permission to use the camera and show the preview to the user
// event:MouseEvent
// doConnect toggles connections on and off.
function doConnect()
{
// connect to the Wowza Media Server
if (nc == null)
{
// create a connection to the wowza media server
nc = new NetConnection();
nc.addEventListener(NetStatusEvent.NET_STATUS, ncOnStatus);
nc.connect(connect.connectStr.text);
//connect.connectButton.label = "Disconnect";
// uncomment this to monitor frame rate and buffer length
//setInterval("updateStreamValues", 500);
// Attach camera to preview
videoCamera.clear();
videoCamera.attachCamera(camera);
//enablePlayControls(true);
// Pass status to
// ExternalInterface.call("logBrowserStreaming", "cameraagreed");
}
else
{
nsPublish = null;
nsPlay = null;
videoCamera.attachNetStream(null);
videoCamera.clear();
videoRemote.attachNetStream(null);
videoRemote.clear();
nc.close();
nc = null;
//enablePlayControls(false);
doSubscribe.label = 'Play';
doPublish.label = 'Stream';
AppendCheckbox.selected = false;
connect.connectButton.label = "Connect";
prompt.text = "";
}
}
// function to monitor the frame rate and buffer length
function updateStreamValues()
{
if (nsPlay != null)
{
fpsText.text = (Math.round(nsPlay.currentFPS*1000)/1000)+" fps";
bufferLenText.text = (Math.round(nsPlay.bufferLength*1000)/1000)+" secs";
}
else
{
fpsText.text = "";
bufferLenText.text = "";
}
}
function nsPlayOnStatus(infoObject:NetStatusEvent)
{
trace("nsPlay: onStatus: "+infoObject.info.code+" ("+infoObject.info.description+")");
if (infoObject.info.code == "NetStream.Play.StreamNotFound" || infoObject.info.code == "NetStream.Play.Failed")
{
prompt.text = infoObject.info.description;
}
}
function doCloseRecord()
{
// after we have hit "Stop" recording and after the buffered video data has been
// sent to the Wowza Media Server close the publishing stream
nsPublish.publish("null");
}
// this function gets called every 250 ms to monitor the;
// progress of flushing the video buffer. Once the video
// buffer is empty we close publishing stream
function flushVideoBuffer()
{
var buffLen:Number = nsPublish.bufferLength;
if (buffLen == 0)
{
clearInterval(flushVideoBufferTimer);
flushVideoBufferTimer = 0;
doCloseRecord();
doPublish.label = 'Stream';
}
}
function nsPublicOnStatus(infoObject:NetStatusEvent)
{
trace("nsPublish: "+infoObject.info.code+" ("+infoObject.info.description+")");
// After calling nsPublish.publish(false); we wait for a status;
// event of "NetStream.Unpublish.Success" which tells us all the video
// and audio data has been written to the flv file. It is at this time
// that we can start playing the video we just recorded.
if (infoObject.info.code == "NetStream.Unpublish.Success")
{
//doPlayStart();
}
if (infoObject.info.code == "NetStream.Play.StreamNotFound" || infoObject.info.code == "NetStream.Play.Failed")
{
prompt.text = infoObject.info.description;
}
}
function initH264Recording(nsPublish:NetStream)
{
var h264Settings:H264VideoStreamSettings = new H264VideoStreamSettings();
h264Settings.setProfileLevel(H264Profile.BASELINE, H264Level.LEVEL_3);
nsPublish.videoStreamSettings = h264Settings;
}
// Start recording video to the server
function doStreamStart()
{
//prompt.text = "Starting stream with mic...";
//prompt.text = microphone;
ExternalInterface.call("logBrowserStreaming", "starting stream");
// stop video playback
//doPlayStop();
// create a new NetStream object for publishing
nsPublish = new NetStream(nc);
var nsPublishClient:Object = new Object();
nsPublish.client = nsPublishClient;
// Set the H.264 encoding parameters
if (testVersion(11,0,0,0))
{
initH264Recording(nsPublish);
}
else
{
prompt.text = "Flash player 11 or greater is required for H.264 encoding (" + Capabilities.version + ").";
}// trace the NetStream status information
nsPublish.addEventListener(NetStatusEvent.NET_STATUS, nsPublicOnStatus);
// publish the stream by name;
nsPublish.publish(nameStr.text, (AppendCheckbox.selected?"append":"record"));
// add custom metadata to the header of the .flv file;
var metaData:Object = new Object();
metaData["description"] = "Recorded using WebcamRecording example.";
nsPublish.send("#setDataFrame", "onMetaData", metaData);
// attach the camera and microphone to the server;
nsPublish.attachCamera(camera);
nsPublish.attachAudio(microphone);
ExternalInterface.call("logBrowserStreaming", microphone);
// set the buffer time to 20 seconds to buffer 20 seconds of video;
// data for better performance and higher quality video
nsPublish.bufferTime = 20;
// Disable the audio choice dropdown
aCb.enabled = false;
}
function doStreamStop()
{
ExternalInterface.call("logBrowserStreaming", "stopping stream");
// stop streaming video and audio to the publishing
// NetStream object
nsPublish.attachAudio(null);
nsPublish.attachCamera(null);
// After stopping the publishing we need to check if there is;
// video content in the NetStream buffer. If there is data
// we are going to monitor the video upload progress by calling
// flushVideoBuffer every 250ms. If the buffer length is 0
// we close the recording immediately.
var buffLen:Number = nsPublish.bufferLength;
if (buffLen > 0)
{
flushVideoBufferTimer = setInterval(flushVideoBuffer,250);
doPublish.label = 'Wait...';
}
else
{
trace("nsPublish.publish(null)");
doCloseRecord();
doPublish.label = 'Start';
}
// Disable the audio choice dropdown
aCb.enabled = true;
}
// Test version function checks if the current flash version supports H.264 Encoding.
function testVersion(v0:Number, v1:Number, v2:Number, v3:Number):Boolean
{
var version:String = Capabilities.version;
var index:Number = version.indexOf(" ");
version = version.substr(index+1);
var verParts:Array = version.split(",");
var i:Number;
var ret:Boolean = true;
while (true)
{
if (Number(verParts[0]) < v0)
{
ret = false;
break;
}
else if (Number(verParts[0]) > v0)
{
break;
}
if (Number(verParts[1]) < v1)
{
ret = false;
break;
}
else if (Number(verParts[1]) > v1)
{
break;
}
if (Number(verParts[2]) < v2)
{
ret = false;
break;
}
else if (Number(verParts[2]) > v2)
{
break;
}
if (Number(verParts[3]) < v3)
{
ret = false;
break;
}
break;
}
trace("testVersion: "+Capabilities.version+">="+v0+","+v1+","+v2+","+v3+": "+ret);
return ret;
}
// External trigger from Javascript;
// Allow stream to start with startBrowserStreaming call from js
ExternalInterface.addCallback("startBrowserStreaming", doStreamStart);
// Allow stream to stop with stopBrowserStreaming call from js;
ExternalInterface.addCallback("stopBrowserStreaming", doStreamStop);
stage.align = "TL";
stage.scaleMode = "noScale";
startCamera();
You can switch your audio source without touching the NetConnection and/or the NetStream.
Take this simple example, where I used a button to change my audio source :
const server:String = 'rtmp://localhost/live';
const stream:String = 'live';
var nc:NetConnection;
var ns_publish:NetStream;
nc = new NetConnection();
nc.addEventListener(
NetStatusEvent.NET_STATUS,
function(e:NetStatusEvent):void {
if(e.info.code == 'NetConnection.Connect.Success'){
publish();
}
}
)
nc.addEventListener(AsyncErrorEvent.ASYNC_ERROR, function(e:AsyncErrorEvent):void {})
nc.connect(server);
function publish():void {
var cam:Camera = Camera.getCamera();
// for my case, I have 2 mic, and I start with the first
var mic:Microphone = Microphone.getMicrophone(0);
ns_publish = new NetStream(nc);
ns_publish.attachAudio(mic);
ns_publish.attachCamera(cam);
ns_publish.publish(stream, 'record');
}
btn_switch_mic.addEventListener(MouseEvent.CLICK, function(e){
// I can switch to the second mic without initialize my NetConnection and/or my NetStream
var mic:Microphone = Microphone.getMicrophone(1);
ns_publish.attachAudio(mic);
})
I tested this code with Wowza Streaming Engine 4.1.1 (free version without Wowza Transcoder AddOn of course) and Flash Media Server 4.5, and It's working fine.
Note : We can use the same manner to change video source (Camera).
Hope all that can help you.
I have a question about getting a specific animation added to some xml data I'm pulling. I need some advice on how to make the data move from left to right or vise versa.Just for example I downloaded a rss feed from BBC world news, so it's just an xml file. Both the flash and xml are saved in the same folder and I can get in flash and display the data. Here's are the code so far:
import flash.text.TextField;
import flash.sampler.StackFrame;
import flash.display.MovieClip;
var yPlacement:int = 20;
var xPlacement:int = 30;
var distance:int = 60;
var loader:URLLoader = new URLLoader();
loader.load(new URLRequest("bbc-worldnews-rss.xml"));
loader.addEventListener(Event.COMPLETE, handleComplete);
function handleComplete(event:Event):void
{
var rawXML:XML = new XML(loader.data);
rawXML.ignoreWhite = true;
//trace(rawXML.channel.image.url);
var items:XMLList = rawXML.channel.item;
//trace("Total new items", items.length());
for each (var item:XML in items)
{
//trace(item.title);
var feedTitle:String = item.title.toString();
var myText:TextField = new TextField();
myText.text = feedTitle;
myText.autoSize = TextFieldAutoSize.LEFT;
myText.x = 2;
myText.y = 2;
var clip_mc = new MovieClip();
clip_mc.addChild(myText);
addChild(clip_mc);
clip_mc.y = yPlacement;
clip_mc.x = xPlacement;
yPlacement = yPlacement + distance;
}
//trace("First item title:", item[0].title);
}
I also know the code that makes text move side to side but I don't know how to incorporate it my codes above:
onClipEvent ( load ) {
startPoint = 1280; //this is where the clip will start
endPoint = -1080; //this is where the clip will end, and restart to the startPoint.
speed = 5; //this is how many pixels the text will move each frame.
}
onClipEvent ( enterFrame ) {
this._x -= speed; //you are telling the MC to move to the left 5 pixels each frame.
if (this._x <= endPoint ) { //if your clip goes beyond the end point.
this._x = startPoint; //go back to the starting point.
}
}
I hope I'm not confusing anyone, I just need to get the data I get from xml file to move side to side... I may be complete off course but I would GREATLY appreciate anyone's help!
Thank you,
For starters, the second snippet of code you posted is actually ActionScript 2, not 3.
You'd need to update that snippet to AS3 in order for this to work. Try something like this:
var startPoint:int = 1280;
var endPoint:int = -1080;
var speed:int = 5;
function moveMC(mc:MovieClip):void {
mc.addEventListener(Event.ENTER_FRAME, tick);
}
function tick(e:Event):void {
e.currentTarget.x -= speed;
if (e.currentTarget.x <= endPoint) {
e.currentTarget.x = startPoint;
}
}
You could then call moveMC() after you've added your newly created MovieClip to the stage.
Edit: You can use that snippet right in your for each loop like this:
for each (var item:XML in items)
{
//trace(item.title);
var feedTitle:String = item.title.toString();
var myText:TextField = new TextField();
myText.text = feedTitle;
myText.autoSize = TextFieldAutoSize.LEFT;
myText.x = 2;
myText.y = 2;
var clip_mc = new MovieClip();
clip_mc.addChild(myText);
addChild(clip_mc);
clip_mc.y = yPlacement;
clip_mc.x = xPlacement;
yPlacement = yPlacement + distance;
//takes in reference to MovieClip, start point, end point and speed
moveMC(clip_mc, 1280, -1080, 5);
}
function moveMC(mc:MovieClip, startPoint:int, endPoint:int, speed:int):void {
mc.startPoint = startPoint;
mc.endPoint = endPoint;
mc.speed = speed;
mc.addEventListener(Event.ENTER_FRAME, tick);
}
function tick(e:Event):void {
var mc:MovieClip = e.currentTarget as MovieClip;
mc.x -= mc.speed;
if (mc.x <= mc.endPoint) {
mc.x = mc.startPoint;
}
}