• Main Page
  • Related Pages
  • Packages
  • Classes
  • Files
  • File List

audio/AudioInterfacePCM.java

Go to the documentation of this file.
00001 
00002 package audio;
00003 
00004 import java.io.IOException;
00005 
00006 import javax.sound.sampled.AudioFormat;
00007 import javax.sound.sampled.AudioSystem;
00008 import javax.sound.sampled.DataLine;
00009 import javax.sound.sampled.LineUnavailableException;
00010 import javax.sound.sampled.Mixer;
00011 import javax.sound.sampled.SourceDataLine;
00012 import javax.sound.sampled.TargetDataLine;
00013 
00014 import protocol.VoicePDU;
00015 
00016 import utils.Log;
00017 import utils.OctetBuffer;
00018 
00019 /**
00020  *  Implements the audio interface for 16-bit signed linear audio (PCM_SIGNED).
00021  *  It also provides support for CODECs that can convert to and from Signed LIN16.
00022  *
00023  *  @author Mikica B Kocic, based on <em>hevaily</em> modified 
00024  *          and re-documented Tim Panton's (thp@westhawk.co.uk) code 
00025  *          from org.asteriskjava.iax.audio.javasound.Audio8k
00026  */
00027 public class AudioInterfacePCM implements AudioInterface 
00028 {
00029     //////////////////////////////////////////////////////////////////////////////////////
00030     /* Constants
00031      */
00032     /** Audio buffering depth in number of frames */
00033     private static final int FRAME_COUNT = 10;
00034 
00035     /** Low-level water mark used for de-jittering */
00036     private static final int LLBS = 6;
00037 
00038     /** Frame interval in milliseconds */
00039     private static final int FRAME_INTERVAL = 20;
00040 
00041     //////////////////////////////////////////////////////////////////////////////////////
00042     /* Properties
00043      */
00044     /** Stereo recording */
00045     private boolean propertyStereoRec = false;
00046     
00047     /** Big buffers */
00048     private boolean propertyBigBuff = false;
00049     
00050     /** Input device name */
00051     private String propertyInputDeviceName = null;
00052     
00053     /** Output device name */
00054     private String propertyOutputDeviceName = null;
00055     
00056     //////////////////////////////////////////////////////////////////////////////////////
00057     /* Common file formats
00058      */
00059     private AudioFormat mono8k;
00060     private AudioFormat stereo8k;
00061     private AudioFormat mono44k;
00062 
00063     //////////////////////////////////////////////////////////////////////////////////////
00064     /* Audio Input (audio recorder interface from microphone)
00065      */
00066     private TargetDataLine targetDataLine = null;
00067     private volatile Thread audioSenderThread = null;
00068     private volatile Thread micRecorderThread = null;
00069     private volatile Packetizer audioSender = null;
00070     
00071     /* Audio input buffer (between microphone recorder and audio sender)
00072      */
00073     private AudioBuffer[] recordBuffer = new AudioBuffer[ FRAME_COUNT ];
00074     private int micBufPut = 0;
00075     private int micBufGet = 0;
00076     private long lastMicTimestamp = 0;
00077     
00078     //////////////////////////////////////////////////////////////////////////////////////
00079     /* Audio Output (audio player interface to speaker)
00080      */
00081     private SourceDataLine sourceDataLine = null;
00082     private volatile Thread audioPlayerThread = null;
00083 
00084     /* Dejitter buffer (between UDP and audio output)
00085      */
00086     private AudioBuffer[] playBuffer = new AudioBuffer[ FRAME_COUNT + FRAME_COUNT ];
00087     private int jitBufPut = 0; // incoming (enqueueing) packets here
00088     private int jitBufGet = 0; // ougoing packets (to audio output) from here
00089     private long jitBufFudge = 0; // total sample skew
00090     private boolean jitBufFirst = true; // flag whether it will be first packet in
00091     private boolean playerIsEnabled = false; // can write or not to audio output
00092     private long deltaTimePlayerMinusMic = 0; // used to calculate skew
00093 
00094     /** Measured call length in milliseconds */
00095     private long callLength = 0;
00096 
00097     /* Ringer tone generator (does not use dejitter buffer; writes directly
00098      * to audio output).
00099      */
00100     private volatile Thread ringerThread = null;
00101     private byte[] ringSamples = null;
00102     private byte[] silenceSamples = null;
00103     
00104     private boolean providingRingBack = false;
00105     private long ringTimer = -1;
00106 
00107     //////////////////////////////////////////////////////////////////////////////////////
00108     
00109     /**
00110      * Constructor for the AudioInterfacePCM object
00111      */
00112     public AudioInterfacePCM () 
00113     {
00114         this.mono8k = new AudioFormat(
00115                             AudioFormat.Encoding.PCM_SIGNED,
00116                             8000f, 16, 1, 2, 8000f, true );
00117         
00118         this.stereo8k = new AudioFormat(
00119                             AudioFormat.Encoding.PCM_SIGNED,
00120                             8000f, 16, 2, 4, 8000f, true );
00121 
00122         this.mono44k =  new AudioFormat(
00123                             AudioFormat.Encoding.PCM_SIGNED,
00124                             44100f, 16, 1, 2, 44100f, true );
00125 
00126         initializeRingerSamples ();
00127 
00128         //////////////////////////////////////////////////////////////////////////////////
00129         
00130         /*  Initializes source and target data lines used by the instance.
00131          */
00132         getAudioIn ();
00133         getAudioOut ();
00134 
00135         //////////////////////////////////////////////////////////////////////////////////
00136         
00137         if ( this.targetDataLine != null ) 
00138         {
00139             Runnable thread = new Runnable() {
00140                 public void run () {
00141                     pduSenderWorker ();
00142                 }
00143             };
00144 
00145             this.audioSenderThread = new Thread( thread, "Tick-send" );
00146             this.audioSenderThread.setDaemon( true );
00147             this.audioSenderThread.setPriority( Thread.MAX_PRIORITY - 1 );
00148 
00149             this.audioSenderThread.start();
00150         }
00151 
00152         //////////////////////////////////////////////////////////////////////////////////
00153         
00154         if ( this.sourceDataLine != null ) 
00155         {
00156             Runnable thread = new Runnable () {
00157                 public void run () {
00158                     audioPlayerWorker ();
00159                 }
00160             };
00161 
00162             this.audioPlayerThread = new Thread( thread, "Tick-play" );
00163             this.audioPlayerThread.setDaemon( true );
00164             this.audioPlayerThread.setPriority( Thread.MAX_PRIORITY );
00165 
00166             this.audioPlayerThread.start();
00167         }
00168 
00169         //////////////////////////////////////////////////////////////////////////////////
00170         
00171         if ( this.sourceDataLine != null ) 
00172         {
00173             Runnable thread = new Runnable () {
00174                 public void run () {
00175                     ringerWorker ();
00176                 }
00177             };
00178     
00179             this.ringerThread = new Thread( thread, "Ringer" );
00180             this.ringerThread.setDaemon( true );
00181             this.ringerThread.setPriority( Thread.MIN_PRIORITY );
00182 
00183             this.ringerThread.start ();
00184         }
00185 
00186         //////////////////////////////////////////////////////////////////////////////////
00187 
00188         if ( this.sourceDataLine == null ) {
00189             Log.attn( "Failed to open audio output device (speaker or similar)" );
00190         }
00191 
00192         if ( this.targetDataLine == null ) {
00193             Log.attn( "Failed to open audio capture device (microphone or similar)" );
00194         }
00195         
00196         Log.trace( "Created 8kHz 16-bit PCM audio interface; Sample size = " 
00197                 + this.getSampleSize () + " octets" );
00198     }
00199     
00200     //////////////////////////////////////////////////////////////////////////////////////
00201     
00202     /**
00203      *  Returns preferred the minimum sample size for use in creating buffers etc.
00204      */
00205     @Override
00206     public int getSampleSize () 
00207     {
00208         AudioFormat af = this.mono8k;
00209         return (int) ( af.getFrameRate() * af.getFrameSize() * FRAME_INTERVAL / 1000.0 );
00210     }
00211 
00212     /**
00213      *  Returns our VoicePDU format
00214      */
00215     @Override
00216     public int getVoicePduSubclass () 
00217     {
00218         return protocol.VoicePDU.LIN16;
00219     }
00220 
00221     /**
00222      * Sets the active audio sender for the recorder
00223      */
00224     @Override
00225     public void setAudioSender( AudioInterface.Packetizer as ) 
00226     {
00227         this.audioSender = as;
00228     }
00229 
00230     //////////////////////////////////////////////////////////////////////////////////////
00231 
00232     /**
00233      *  Stops threads and cleans-up the instance.
00234      */
00235     @Override
00236     public void cleanUp ()
00237     {
00238         /* Signal all worker threads to quit
00239          */
00240         Thread spkout  = this.audioPlayerThread;
00241         Thread ringer  = this.ringerThread;
00242         Thread micin   = this.micRecorderThread;
00243         Thread sender  = this.audioSenderThread;
00244 
00245         this.audioPlayerThread = null;
00246         this.ringerThread      = null;
00247         this.micRecorderThread = null;
00248         this.audioSenderThread   = null;
00249 
00250         /* Wait for worker threads to complete
00251          */
00252         if ( spkout != null ) {
00253             try {
00254                 spkout.interrupt ();
00255                 spkout.join ();
00256             } catch ( InterruptedException e ) {
00257                 /* ignored */
00258             }
00259         }
00260 
00261         if ( ringer != null ) {
00262             try {
00263                 ringer.interrupt ();
00264                 ringer.join ();
00265             } catch ( InterruptedException e ) {
00266                 /* ignored */
00267             }
00268         }
00269 
00270         if ( micin != null ) {
00271             try {
00272                 micin.interrupt ();
00273                 micin.join ();
00274             } catch ( InterruptedException e ) {
00275                 /* ignored */
00276             }
00277         }
00278         
00279         if ( sender != null ) {
00280             try {
00281                 sender.interrupt ();
00282                 sender.join ();
00283             } catch ( InterruptedException e ) {
00284                 /* ignored */
00285             }
00286         }
00287 
00288         /* Be nice and close audio data lines
00289          */
00290         if ( this.sourceDataLine != null )
00291         {
00292             this.sourceDataLine.close ();
00293             this.sourceDataLine = null;
00294         }
00295         
00296         if ( this.targetDataLine != null )
00297         {
00298             this.targetDataLine.close ();
00299             this.targetDataLine = null;
00300         }
00301     }
00302 
00303     //////////////////////////////////////////////////////////////////////////////////////
00304     
00305     /**
00306      *  Writes frames to audio output i.e. source data line
00307      */
00308     private void audioPlayerWorker () 
00309     {
00310         Log.trace( "Thread started" );
00311         
00312         while( this.audioPlayerThread != null ) 
00313         {
00314             if ( this.sourceDataLine == null ) {
00315                 break;
00316             }
00317 
00318             long next = this.writeBuffersToAudioOutput ();
00319             
00320             if ( next < 1 ) {
00321                 next = FRAME_INTERVAL;
00322             }
00323             
00324             try {
00325                 Thread.sleep( next );
00326             } catch( Throwable e ) {
00327                 /* ignored */
00328             }
00329         }
00330         
00331         Log.trace( "Thread completed" );
00332         
00333         this.audioPlayerThread = null;
00334     }
00335 
00336     /**
00337      *  Writes de-jittered audio frames to audio output
00338      */
00339     private long writeBuffersToAudioOutput () 
00340     {
00341         if ( this.sourceDataLine == null ) {
00342             return 0;
00343         }
00344         
00345         int top = this.jitBufPut;
00346         if ( top - this.jitBufGet > this.playBuffer.length ) 
00347         {
00348             if ( this.jitBufGet == 0 ) {
00349                 this.jitBufGet = top;
00350             } else {
00351                 this.jitBufGet = top - this.playBuffer.length / 2;
00352             }
00353         }
00354 
00355         if ( ! this.playerIsEnabled )
00356         {
00357             /* We start when we have half full the buffers, FRAME_COUNT is 
00358              * usable buffer cap, size is twice that to keep history for AEC
00359              */
00360             if ( top - this.jitBufGet >= ( FRAME_COUNT + LLBS ) / 2 ) 
00361             {
00362                 startPlay ();
00363 
00364                 this.jitBufFirst = true;
00365             }
00366             else 
00367             {
00368                 return FRAME_INTERVAL;
00369             }
00370         }
00371 
00372         int sz = 320;
00373         boolean fudgeSynch = true;
00374         int frameSize = this.sourceDataLine.getFormat().getFrameSize ();
00375 
00376         for ( ; this.jitBufGet <= top; ++this.jitBufGet ) 
00377         {
00378             AudioBuffer ab = this.playBuffer[ this.jitBufGet % this.playBuffer.length ];
00379             byte[] obuff = ab.getByteArray ();
00380             int avail = this.sourceDataLine.available() / (obuff.length + 2);
00381             sz = obuff.length;
00382 
00383             /* Take packet this.jitBufGet if available
00384              * Dejitter capacity: top - this.jitBufGet
00385              */
00386             if ( avail > 0 ) 
00387             {
00388                 if ( ! ab.isWritten () ) // Missing packet
00389                 {
00390                     /* Flag indicating whether we decide to conceal 
00391                      * vs to wait for missing data
00392                      */
00393                     boolean concealMissingBuffer = false;
00394                     
00395                     if ( avail > LLBS - 2 ) {
00396                         // Running out of sound
00397                         concealMissingBuffer = true;
00398                     }
00399                     if ( ( top - this.jitBufGet ) >= ( this.playBuffer.length - 2 ) ) {
00400                         // Running out of buffers
00401                         concealMissingBuffer = true;
00402                     }
00403                     if ( this.jitBufGet == 0 ) {
00404                         // No data to conceal with
00405                         concealMissingBuffer = false;
00406                     }
00407                     
00408                     /* Now conceal missing data or wait for it
00409                      */
00410                     if ( concealMissingBuffer ) {
00411                         concealMissingDataForAudioOutput(this.jitBufGet);
00412                     } else {
00413                         break; // Waiting for missing data
00414                     }
00415                 }
00416 
00417                 int start = 0;
00418                 int len = obuff.length;
00419                 
00420                 /* We do adjustments only if we have a timing reference from mic
00421                  */
00422                 if ( fudgeSynch && this.lastMicTimestamp > 0 
00423                         && this.lastMicTimestamp != Long.MAX_VALUE) 
00424                 {
00425                     /* Only one per writeBuff call cause we depend on this.lastMicTime
00426                      */
00427                     fudgeSynch = false;
00428                     long delta = ab.getTimestamp() - this.lastMicTimestamp;
00429 
00430                     if ( this.jitBufFirst ) 
00431                     {
00432                         this.deltaTimePlayerMinusMic = delta;
00433                         this.jitBufFirst = false;
00434                     }
00435                     else 
00436                     {
00437                         /* If diff is positive, this means that the source clock is 
00438                          * running faster than the audio clock so we lop a few bytes
00439                          * off and make a note of the fudge factor.
00440                          * If diff is negative, this means the audio clock is faster 
00441                          * than the source clock so we make up a couple of samples
00442                          * and note down the fudge factor.
00443                          */
00444                         int diff = (int) ( delta - this.deltaTimePlayerMinusMic );
00445                         
00446                         /* We expect the output buffer to be full
00447                          */
00448                         int max = (int) Math.round( (LLBS / 2) * FRAME_INTERVAL);
00449                         
00450                         if ( Math.abs(diff) > FRAME_INTERVAL ) {
00451                             // "Delta = " + delta + " diff =" + diff );
00452                         }
00453 
00454                         if ( diff > max ) {
00455                             start = (diff > (LLBS * FRAME_INTERVAL)) ?
00456                                 frameSize * 2 : frameSize; // panic ?
00457                             len -= start;
00458                             // Snip:  start / frameSize  sample(s)
00459                             this.jitBufFudge -= start / frameSize;
00460                         }
00461                         if (diff < -1 * FRAME_INTERVAL) {
00462                             this.sourceDataLine.write( obuff, 0, frameSize );
00463                             // Paste: added a sample
00464                             this.jitBufFudge += 1;
00465                         }
00466                     }
00467                 }
00468                 
00469                 /* Now write data to audio output and mark audio buffer 'read'
00470                  */
00471                 this.sourceDataLine.write( obuff, start, len );
00472                 this.callLength += FRAME_INTERVAL;
00473 
00474                 ab.setRead ();
00475             }
00476             else // No place for (more?) data in SDLB 
00477             {
00478                 break;
00479             }
00480         }
00481         
00482         long ttd = ( ( sz * LLBS / 2 ) - this.sourceDataLine.available() ) / 8;
00483         return ttd;
00484     }
00485 
00486     /**
00487      *  Conceals missing data in the audio output buffer by averaging
00488      *  from samples taken from the the previous and next buffer.
00489      */
00490     private void concealMissingDataForAudioOutput( int n ) 
00491     {
00492         byte[] target = this.playBuffer[n % this.playBuffer.length].getByteArray();
00493         byte[] prev = this.playBuffer[ (n - 1) % this.playBuffer.length].getByteArray();
00494         byte[] next = this.playBuffer[ (n + 1) % this.playBuffer.length].getByteArray();
00495         
00496         /*  Creates a packet by averaging the corresponding bytes 
00497          *  in the surrounding packets hoping that new packet will sound better 
00498          *  than silence.
00499          *  TODO fix for 16-bit samples
00500          */
00501         for ( int i = 0; i < target.length; ++i ) 
00502         {
00503             target[i] = (byte) ( 0xFF & ( (prev[ i ] >> 1 ) + ( next[i] >> 1 ) ) );
00504         }
00505     }
00506 
00507     /**
00508      *  Writes directly to source line without buffering
00509      */
00510     @Override
00511     public void writeDirectly(byte[] buff) 
00512     {
00513         if ( this.sourceDataLine == null ) {
00514             return;
00515         }
00516         
00517         this.sourceDataLine.write( buff, 0, buff.length );
00518     }
00519 
00520     /**
00521      *  Enqueue packet for playing into de-jitter buffer.
00522      */
00523     @Override
00524     public void writeBuffered( byte[] buff, long timestamp ) throws IOException 
00525     {
00526         if ( this.sourceDataLine == null ) {
00527             return;
00528         }
00529         
00530         int fno = (int) ( timestamp / (AudioInterfacePCM.FRAME_INTERVAL ) );
00531 
00532         AudioBuffer ab = this.playBuffer[ fno % this.playBuffer.length ];
00533         byte nbuff[] = ab.getByteArray ();
00534         
00535         if ( propertyStereoRec )
00536         {
00537             for ( int i = 0; i < nbuff.length / 4; ++i )
00538             {
00539                 nbuff[i * 4] = 0; // Left silent
00540                 nbuff[i * 4 + 1] = 0; // Left silent
00541                 nbuff[i * 4 + 2] = buff[i * 2];
00542                 nbuff[i * 4 + 3] = buff[i * 2 + 1];
00543             }
00544         }
00545         else 
00546         {
00547             System.arraycopy( buff, 0, nbuff, 0, nbuff.length );
00548         }
00549         
00550         ab.setWritten ();
00551         ab.setTimestamp( timestamp );
00552         
00553         this.jitBufPut = fno;
00554     }
00555 
00556     //////////////////////////////////////////////////////////// VOICE PDU SENDER ////////
00557     
00558     /**
00559      *  Sends audio frames to UDP channel at regular intervals (ticks)
00560      */
00561     private void pduSenderWorker () 
00562     {
00563         Log.trace( "Thread started" );
00564         
00565         long set = 0;
00566         long last, point = 0;
00567         boolean audioTime = false;
00568 
00569         while( this.audioSenderThread != null )
00570         {
00571             if ( this.targetDataLine == null ) {
00572                 break;
00573             }
00574 
00575             /* This should be current time: interval += FRAME_INTERVAL
00576              */
00577             point += FRAME_INTERVAL;
00578             
00579             /* Delta time
00580              */
00581             long delta = point - set + FRAME_INTERVAL;
00582             
00583             if ( this.targetDataLine.isActive () ) 
00584             {
00585                 if ( ! audioTime ) // Take care of "discontinuous time"
00586                 {
00587                     audioTime = true;
00588                     set = this.targetDataLine.getMicrosecondPosition() / 1000;
00589                     last = point = set;
00590                 }
00591             }
00592             else 
00593             {
00594                 point = 0;
00595                 delta = FRAME_INTERVAL; // We are live before TDL
00596                 set = System.currentTimeMillis (); // For ring cadence
00597                 audioTime = false;
00598             }
00599             
00600             sendAudioFrame( set );
00601             
00602             // If we are late, set is larger than last so we sleep less
00603             // If we are early, set is smaller than last and we sleep longer
00604             //
00605             if ( delta > 1 ) // Only sleep if it is worth it...
00606             {
00607                 try {
00608                     Thread.sleep( delta );
00609                 } catch( InterruptedException e ) {
00610                     /* ignored */
00611                 }
00612             }
00613 
00614             last = set;
00615             
00616             if ( audioTime ) {
00617                 set = this.targetDataLine.getMicrosecondPosition() / 1000;
00618             }
00619             
00620             if ( point > 0 ) {
00621                 Log.audio( "Ticker: slept " + delta + " from " + last + ", now " + set );
00622             }
00623         }
00624         
00625         Log.trace( "Thread completed" );
00626 
00627         this.audioSenderThread = null;
00628     }
00629 
00630     /**
00631      *  Called every FRAMEINTERVAL ms to send audio frame
00632      */
00633     private void sendAudioFrame( long set )
00634     {
00635         if ( this.audioSender == null ) {
00636             return;
00637         }
00638 
00639         try {
00640             this.audioSender.send ();
00641         } catch( IOException e ) {
00642             Log.exception( Log.WARN, e );
00643         }
00644     }
00645 
00646     //////////////////////////////////////////////////////////// AUDIO INPUT /////////////
00647     
00648     /**
00649      *  Records audio samples from the microphone
00650      */
00651     private void micRecorderWorker () 
00652     {
00653         Log.trace( "Thread started" );
00654         
00655         while( this.micRecorderThread != null ) 
00656         {
00657             if ( this.targetDataLine == null ) {
00658                 break;
00659             }
00660 
00661             micDataRead ();
00662         }
00663         
00664         Log.trace( "Thread stopped" );
00665         
00666         this.micRecorderThread = null;
00667     }
00668 
00669     /**
00670      *  Called from micRecorder to record audio samples from microphone.
00671      *  Blocks as needed.
00672      */
00673     private void micDataRead () 
00674     {
00675         try 
00676         {
00677             int fresh = this.micBufPut % this.recordBuffer.length;
00678             AudioBuffer ab = this.recordBuffer[ fresh ];
00679             byte[] buff = ab.getByteArray ();
00680             
00681             this.targetDataLine.read( buff, 0, buff.length );
00682             
00683             long stamp = this.targetDataLine.getMicrosecondPosition () / 1000;
00684             if ( stamp >= this.lastMicTimestamp )
00685             {
00686                 if ( ab.isWritten () ) {
00687                     // Overrun audio data: stamp + "/" + got
00688                 }
00689 
00690                 ab.setTimestamp( stamp );
00691                 ab.setWritten (); // should test for overrun ???
00692                 
00693                 /* Out audio data into buffer: 
00694                  * fresh + " " + ab.getTimestamp() + "/" + this.micBufPut
00695                  */
00696                 
00697                 ++this.micBufPut;
00698             }
00699             else // Seen at second and subsequent activations, garbage data 
00700             {
00701                 /* Drop audio data */
00702             }
00703             
00704             this.lastMicTimestamp = stamp;
00705         }
00706         catch( Exception e ) 
00707         {
00708             /* ignored */
00709         }
00710     }
00711 
00712     /**
00713      *  Read from the Microphone, into the buffer provided, 
00714      *  but <em>only</em> filling getSampSize() bytes.
00715      *  Returns the timestamp of the sample from the audio clock.
00716      *
00717      *  @param  buff audio samples
00718      *  @return the timestamp of the sample from the audio clock.
00719      *  @exception IOException Description of Exception
00720      */
00721     @Override
00722     public long readWithTimestamp( byte[] buff ) throws IOException
00723     {
00724         int micnext = this.micBufGet % this.recordBuffer.length;
00725         int buffCap = (this.micBufPut - this.micBufGet ) % this.recordBuffer.length;
00726         long timestamp = 0;
00727         
00728         Log.audio( "Getting audio data from buffer " + micnext + "/" + buffCap );
00729 
00730         AudioBuffer ab = this.recordBuffer[ micnext ];
00731         if ( ab.isWritten () 
00732                 && ( this.micBufGet > 0 || buffCap >= this.recordBuffer.length / 2 ) ) 
00733         {
00734             timestamp = ab.getTimestamp ();
00735             resample( ab.getByteArray(), buff );
00736             ab.setRead ();
00737 
00738             ++this.micBufGet;
00739         }
00740         else 
00741         {
00742             System.arraycopy( this.silenceSamples, 0, buff, 0, buff.length );
00743             Log.audio( "Sending silence" );
00744             timestamp = ab.getTimestamp (); // or should we warn them ??
00745         }
00746 
00747         return timestamp;
00748     }
00749 
00750     /**
00751      *  Simple PCM down sampler.
00752      *
00753      * @param src   source buffer with audio samples
00754      * @param dest  destination buffer with audio samples
00755      */
00756     private void resample( byte[] src, byte[] dest ) 
00757     {
00758         if ( src.length == dest.length ) 
00759         {
00760             /* Nothing to down sample; copy samples as-is to destination
00761              */
00762             System.arraycopy( src, 0, dest, 0, src.length );
00763             return;
00764         }
00765         else if ( src.length / 2 == dest.length )
00766         {
00767             /* Source is stereo, send the left channel
00768              */
00769             for ( int i = 0; i < dest.length / 2; i++ ) 
00770             {
00771                 dest[i * 2] = src[i * 4];
00772                 dest[i * 2 + 1] = src[i * 4 + 1];
00773             }
00774             return;
00775         }
00776 
00777         /* Now real work. We assume that it is 44k stereo 16-bit and will down-sample
00778          * it to 8k (but not very clever: no anti-aliasing etc....).
00779          */
00780         OctetBuffer srcBuffer = OctetBuffer.wrap( src );
00781         OctetBuffer destBuffer = OctetBuffer.wrap( dest );
00782         
00783         /* Iterate over the values we have, add them to the target bucket they fall into
00784          * and count the drops....
00785          */
00786         int drange = dest.length / 2;
00787         double v[] = new double[ drange ];
00788         double w[] = new double[ drange ];
00789 
00790         double frequencyRatio = 8000.0 / 44100.0;
00791         int top = src.length / 2;
00792         for ( int eo = 0; eo < top; ++eo ) 
00793         {
00794             int samp = (int) Math.floor( eo * frequencyRatio );
00795             if ( samp >= drange ) {
00796                 samp = drange - 1;
00797             }
00798             v[ samp ] += srcBuffer.getShort( eo * 2 );
00799             w[ samp ]++;
00800         }
00801         
00802         /* Now re-weight the samples to ensure no volume quirks and move to short
00803          */
00804         short vw = 0;
00805         for ( int ei = 0; ei < drange; ++ei ) 
00806         {
00807             if ( w[ei] != 0 ) {
00808                 vw = (short) ( v[ei] / w[ei] );
00809             }
00810             destBuffer.putShort( ei * 2, vw );
00811         }
00812     }
00813 
00814     //////////////////////////////////////////////////////////// RINGER //////////////////
00815     
00816     /**
00817      *  Writes ring signal samples to audio output
00818      */
00819     private void ringerWorker ()
00820     {
00821         Log.trace( "Thread started" );
00822         
00823         while( this.ringerThread != null )
00824         {
00825             if ( this.sourceDataLine == null ) {
00826                 break;
00827             }
00828 
00829             long nap = 100; // default sleep in millis when idle
00830             
00831             if ( this.providingRingBack ) 
00832             {
00833                 nap = 0;
00834                 while( nap < FRAME_INTERVAL ) 
00835                 {
00836                     boolean inRing = ( ( this.ringTimer++ % 120 ) < 40 );
00837                     if ( inRing ) {
00838                         nap = this.writeDirectIfAvail( this.ringSamples );
00839                     } else {
00840                         nap = this.writeDirectIfAvail( this.silenceSamples );
00841                     }
00842                 }
00843             }
00844 
00845             try {
00846                 Thread.sleep( nap );
00847             } catch( InterruptedException ex ) {
00848                 /* ignored */
00849             }
00850         }
00851         
00852         Log.trace( "Thread completed" );
00853         
00854         this.ringerThread = null;
00855     }
00856 
00857     /**
00858      *  Writes audio samples to audio output directly (without using jitter buffer).
00859      *  
00860      *  @return milliseconds to sleep (after which time next write should occur)
00861      */
00862     private long writeDirectIfAvail( byte[] samples ) 
00863     {
00864         if ( this.sourceDataLine == null ) {
00865             return 0;
00866         }
00867 
00868         if ( this.sourceDataLine.available () > samples.length ) {
00869             this.sourceDataLine.write( samples, 0, samples.length );
00870         }
00871 
00872         long nap = ( samples.length * 2 - this.sourceDataLine.available () ) / 8;
00873         return nap;
00874     }
00875 
00876     /**
00877      *  Initializes ringer samples (ring singnal and silecce) samples
00878      */
00879     private void initializeRingerSamples ()
00880     {
00881         /* First generate silence
00882          */
00883         int numOfSamples = this.getSampleSize ();
00884         this.silenceSamples = new byte[ numOfSamples ];
00885         
00886         /* Now generate ringing tone as two superimposed frequencies.
00887          */
00888         double freq1 =  25.0 / 8000;
00889         double freq2 = 420.0 / 8000;
00890         
00891         OctetBuffer rbb = OctetBuffer.allocate( numOfSamples );
00892 
00893         for ( int i = 0; i < 160; ++i )
00894         {
00895             short s = (short) ( Short.MAX_VALUE
00896                                * Math.sin( 2.0 * Math.PI * freq1 * i )
00897                                * Math.sin( 4.0 * Math.PI * freq2 * i )
00898                                / 4.0  /* signal level ~= -12 dB */
00899                                );
00900             rbb.putShort( s );
00901         }
00902         
00903         this.ringSamples = rbb.getStore ();
00904         
00905     }
00906 
00907     //////////////////////////////////////////////////////////// AUDIO OUTPUT ////////////
00908     
00909     /**
00910      *  Get audio output. Initializes source data line.
00911      */
00912     private boolean getAudioOut () 
00913     {
00914         this.sourceDataLine = null;
00915         boolean succeded = false;
00916         
00917         AudioFormat af;
00918         String name;
00919         if ( propertyStereoRec ) {
00920             af = this.stereo8k;
00921             name = "stereo8k";
00922         } else {
00923             af = this.mono8k;
00924             name = "mono8k";
00925         }
00926 
00927         int buffsz = (int) Math.round( LLBS * af.getFrameSize() * af.getFrameRate() *
00928                                       FRAME_INTERVAL / 1000.0 );
00929         
00930         if ( propertyBigBuff ) {
00931             buffsz *= 2.5;
00932         }
00933         
00934         /* We want to do tricky stuff on the 8k mono stream before
00935          * play back, so we accept no other sort of line.
00936          */
00937         String pref = propertyOutputDeviceName;
00938         SourceDataLine sdl = findSourceDataLineByPref( pref, af, name, buffsz );
00939         if ( sdl != null )
00940         {
00941             int outputBufferSize = (int) ( af.getFrameRate() * af.getFrameSize() / 50.0 );
00942             
00943             Log.trace( "Output Buffer Size = " + outputBufferSize );
00944 
00945             for ( int i = 0; i < this.playBuffer.length; ++i ) {
00946                 this.playBuffer[i] = new AudioBuffer( outputBufferSize );
00947             }
00948 
00949             this.sourceDataLine = sdl;
00950             succeded = true;
00951         }
00952         else
00953         {
00954             Log.warn( "No audio output device available" );
00955         }
00956 
00957         return succeded;
00958     }
00959 
00960     /**
00961      *  Returns audio input (target data line)
00962      */
00963     private boolean getAudioIn ()
00964     {
00965         this.targetDataLine = null;
00966         boolean succeded = false;
00967 
00968         /* Make a list of formats we can live with 
00969          */
00970         String names[] = { "mono8k", "mono44k" };
00971         AudioFormat[] afsteps = { this.mono8k, this.mono44k };
00972         if ( propertyStereoRec )  {
00973             names[0] = "stereo8k";
00974             afsteps[0] = this.stereo8k;
00975         }
00976 
00977         int[] smallbuff = {
00978             (int) Math.round( LLBS * afsteps[0].getFrameSize() * afsteps[0].getFrameRate() 
00979                     * FRAME_INTERVAL / 1000.0 ),
00980             (int) Math.round( LLBS * afsteps[1].getFrameSize() * afsteps[1].getFrameRate() 
00981                     * FRAME_INTERVAL / 1000.0 )
00982             };
00983         
00984         /* If LLBS > 4 then these can be the same ( Should tweak based on LLB really.)
00985          */
00986         int[] bigbuff = smallbuff;
00987 
00988         /* choose one based on audio properties
00989          */
00990         int[] buff = propertyBigBuff ? bigbuff : smallbuff;
00991 
00992         /* now try and find a device that will do it - and live up to the preferences
00993          */
00994         String pref = propertyInputDeviceName;
00995         int fno = 0;
00996         this.targetDataLine = null;
00997         for ( ; fno < afsteps.length && this.targetDataLine == null; ++fno ) 
00998         {
00999             this.targetDataLine = fintTargetDataLineByPref(pref, 
01000                     afsteps[fno], names[fno], buff[fno] );
01001         }
01002         
01003         if ( this.targetDataLine != null ) 
01004         {
01005             Log.audio( "TargetDataLine =" + this.targetDataLine + ", fno = " + fno );
01006             
01007             AudioFormat af = this.targetDataLine.getFormat ();
01008             
01009             /* now allocate some buffer space in the raw format
01010              */
01011             
01012             int inputBufferSize = (int) ( af.getFrameRate() * af.getFrameSize() / 50.0 );
01013             
01014             Log.trace( "Input Buffer Size = " + inputBufferSize );
01015             
01016             for ( int i = 0; i < this.recordBuffer.length; ++i ) {
01017                 this.recordBuffer[i] = new AudioBuffer( inputBufferSize );
01018             }
01019             
01020             succeded = true;
01021         }
01022         else 
01023         {
01024             Log.warn( "No audio input device available" );
01025         }
01026 
01027         return succeded;
01028     }
01029 
01030     /**
01031      *  Searches for data line of either sort (source/targe) based on the pref string. 
01032      *  Uses type to determine the sort ie Target or Source. 
01033      *  debugInfo parameter is only used in debug printouts to set the context.
01034      */
01035     private DataLine findDataLineByPref( String pref, AudioFormat af,
01036             String name, int sbuffsz, Class<?> lineClass,
01037             String debugInfo ) 
01038     {
01039         DataLine line = null;
01040         DataLine.Info info = new DataLine.Info(lineClass, af);
01041         try 
01042         {
01043             if ( pref == null ) 
01044             {
01045                 line = (DataLine) AudioSystem.getLine( info );
01046             }
01047             else 
01048             {
01049                 Mixer.Info[] mixes = AudioSystem.getMixerInfo();
01050                 for ( int i = 0; i < mixes.length; ++i ) 
01051                 {
01052                     Mixer.Info mixi = mixes[i];
01053                     String mixup = mixi.getName().trim();
01054                     Log.audio( "Mix " + i + " " + mixup );
01055                     if ( mixup.equals( pref ) ) 
01056                     {
01057                         Log.audio( "Found name match for prefered input mixer" );
01058                         
01059                         Mixer preferedMixer = AudioSystem.getMixer( mixi );
01060                         if ( preferedMixer.isLineSupported( info ) ) 
01061                         {
01062                             line = (DataLine) preferedMixer.getLine( info );
01063                             Log.audio( "Got " + debugInfo + " line" );
01064                             break;
01065                         }
01066                         else 
01067                         {
01068                             Log.audio( debugInfo + " format not supported" );
01069                         }
01070                     }
01071                 }
01072             }
01073         }
01074         catch( Exception e )
01075         {
01076             Log.warn( "Unable to get a " + debugInfo + " line of type: " + name );
01077             line = null;
01078         }
01079 
01080         return line;
01081     }
01082 
01083     /**
01084      *  Searches for target data line according to preferences.
01085      */
01086     private TargetDataLine fintTargetDataLineByPref( String pref, 
01087             AudioFormat af, String name, int sbuffsz )
01088     {
01089         String debugInfo = "recording";
01090         
01091         TargetDataLine line = (TargetDataLine) findDataLineByPref(pref, af, name, 
01092                 sbuffsz, TargetDataLine.class, debugInfo );
01093         
01094         if ( line != null )
01095         {
01096             try 
01097             {
01098                 line.open( af, sbuffsz );
01099                 Log.audio( "Got a " + debugInfo + " line of type: " + name
01100                         + ", buffer size = " + line.getBufferSize () );
01101             }
01102             catch( LineUnavailableException e ) 
01103             {
01104                 Log.warn( "Unable to get a " + debugInfo + " line of type: " + name );
01105                 line = null;
01106             }
01107         }
01108 
01109         return line;
01110     }
01111 
01112     /**
01113      *  Searches for source data line according to preferences.
01114      */
01115     private SourceDataLine findSourceDataLineByPref( String pref, 
01116             AudioFormat af, String name, int sbuffsz )
01117     {
01118         String debtxt = "play";
01119         
01120         SourceDataLine line = (SourceDataLine) findDataLineByPref( pref, af, name, 
01121                 sbuffsz, SourceDataLine.class, debtxt );
01122         
01123         if ( line != null )
01124         {
01125             try 
01126             {
01127                 line.open( af, sbuffsz );
01128                 Log.audio( "Got a " + debtxt + " line of type: " + name
01129                         + ", buffer size = " + line.getBufferSize () );
01130             }
01131             catch( LineUnavailableException e ) 
01132             {
01133                 Log.warn( "Unable to get a " + debtxt + " line of type: " + name );
01134                 line = null;
01135             }
01136         }
01137         return line;
01138     }
01139 
01140     //////////////////////////////////////////////////////////////////////////////////////
01141 
01142     /**
01143      *  Stops the audio recording worker thread
01144      */
01145     @Override
01146     public void stopRecording () 
01147     {
01148         if ( this.targetDataLine == null ) {
01149             return;
01150         }            
01151 
01152         Log.audio( "Stopped recoring ");
01153 
01154         this.targetDataLine.stop ();
01155         this.micRecorderThread = null;
01156         this.audioSender = null;
01157     }
01158 
01159     /**
01160      *  Start the audio recording worker thread
01161      */
01162     @Override
01163     public long startRecording ()
01164     {
01165         if ( this.targetDataLine == null ) {
01166             return 0;
01167         }
01168         
01169         Log.audio( "Started recording" );
01170 
01171         if ( this.targetDataLine.available() > 0 ) 
01172         {
01173             Log.audio( "Flushed recorded data" );
01174             this.targetDataLine.flush ();
01175             this.lastMicTimestamp = Long.MAX_VALUE; // Get rid of spurious samples
01176         } 
01177         else 
01178         {
01179             this.lastMicTimestamp = 0;
01180         }
01181         
01182         this.targetDataLine.start ();
01183 
01184         /* Clean receive buffers pointers 
01185          */
01186         this.micBufPut = this.micBufGet = 0;
01187         for ( int i = 0; i < this.recordBuffer.length; ++i ) {
01188             this.recordBuffer[i].setRead ();
01189         }
01190 
01191         Runnable thread = new Runnable() {
01192             public void run () {
01193                 micRecorderWorker ();
01194             }
01195         };
01196 
01197         this.micRecorderThread = new Thread( thread, "Tick-rec" );
01198         this.micRecorderThread.setDaemon( true );
01199         this.micRecorderThread.setPriority( Thread.MAX_PRIORITY - 1 );
01200         this.micRecorderThread.start ();
01201 
01202         return this.targetDataLine.getMicrosecondPosition() / 1000;
01203     }
01204 
01205     /**
01206      *  Starts the audio output worker thread
01207      */
01208     @Override
01209     public void startPlay () 
01210     {
01211         if ( this.sourceDataLine == null ) {
01212             return;
01213         }
01214         
01215         Log.audio( "Started playing" );
01216         
01217         /* Reset the dejitter buffer
01218          */
01219         this.jitBufPut = 0;
01220         this.jitBufGet = 0;
01221         this.jitBufFudge = 0;
01222         this.callLength = 0;
01223 
01224         this.sourceDataLine.flush ();
01225         this.sourceDataLine.start ();
01226 
01227         this.playerIsEnabled = true;
01228     }
01229 
01230     /**
01231      *  Stops the audio output worker thread
01232      */
01233     public void stopPlay ()
01234     {
01235         /* Reset the buffer
01236          */
01237         this.jitBufPut = 0;
01238         this.jitBufGet = 0;
01239         this.playerIsEnabled = false;
01240 
01241         if ( this.sourceDataLine == null ) {
01242             return;
01243         }
01244 
01245         Log.audio( "Stopped playing" );
01246 
01247         this.sourceDataLine.stop ();
01248     
01249         if ( this.jitBufFudge != 0 )
01250         {
01251             Log.audio( "Total sample skew: " + this.jitBufFudge );
01252             Log.audio( "Percentage: " + (100.0 * this.jitBufFudge / (8 * this.callLength)) );
01253             
01254             this.jitBufFudge = 0;
01255         }
01256         
01257         if ( this.callLength > 0 ) {
01258             Log.trace( "Total call Length: " + this.callLength + " ms" );
01259         }
01260         
01261         this.sourceDataLine.flush ();
01262     }
01263 
01264     /**
01265      *  Gets audio interface by VoicePDU format
01266      *
01267      * @return AudioInterfacePCM
01268      */
01269     @Override
01270     public AudioInterface getByFormat( Integer format ) 
01271     {
01272         AudioInterface ret = null;
01273         switch( format.intValue () ) 
01274         {
01275             case VoicePDU.ALAW:
01276                 ret = new AudioCodecAlaw(this);
01277                 break;
01278             case VoicePDU.ULAW:
01279                 ret = new AudioCodecUlaw(this);
01280                 break;
01281             case VoicePDU.LIN16:
01282                 ret = this;
01283                 break;
01284             default:
01285                 Log.warn( "Invalid format for Audio " + format.intValue () );
01286                 Log.warn( "Forced uLaw " );
01287                 ret = new AudioCodecUlaw(this);
01288                 break;
01289         }
01290         
01291         Log.audio( "Using audio Interface of type : " + ret.getClass().getName() );
01292         
01293         return ret;
01294     }
01295 
01296     /**
01297      *  Starts ringing signal
01298      */
01299     @Override
01300     public void startRinging ()
01301     {
01302         if ( this.sourceDataLine == null ) {
01303             return;
01304         }
01305 
01306         this.sourceDataLine.flush();
01307         this.sourceDataLine.start();
01308         this.providingRingBack = true;
01309     }
01310 
01311     /**
01312      *  Stops ringing singnal
01313      */
01314     @Override
01315     public void stopRinging () 
01316     {
01317         if ( this.sourceDataLine == null ) {
01318             return;
01319         }
01320 
01321         if ( this.providingRingBack ) 
01322         {
01323             this.providingRingBack = false;
01324             this.ringTimer = -1;
01325             
01326             this.sourceDataLine.stop();
01327             this.sourceDataLine.flush();
01328         }
01329     }
01330 
01331 }

Generated on Thu Dec 16 2010 14:44:42 for VoIP Kryptofon by  doxygen 1.7.2