实时音频流Java

用户名

我正在另一台PC上实现从MIC到Java服务器的实时流传输。但是我只听到白噪声。

我已经附上了客户端程序和服务器程序

Client:

import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.UnknownHostException;

import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.TargetDataLine;

public class Mic 
{
    public byte[] buffer;
    private int port;
    static AudioInputStream ais;

    public static void main(String[] args)
    {
        TargetDataLine line;
        DatagramPacket dgp; 

        AudioFormat.Encoding encoding = AudioFormat.Encoding.PCM_SIGNED;
        float rate = 44100.0f;
        int channels = 2;
        int sampleSize = 16;
        boolean bigEndian = true;
        InetAddress addr;


        AudioFormat format = new AudioFormat(encoding, rate, sampleSize, channels, (sampleSize / 8) * channels, rate, bigEndian);

        DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
        if (!AudioSystem.isLineSupported(info)) {
            System.out.println("Line matching " + info + " not supported.");
            return;
        }

        try
        {
            line = (TargetDataLine) AudioSystem.getLine(info);

            int buffsize = line.getBufferSize()/5;
            buffsize += 512; 

            line.open(format);

            line.start();   

            int numBytesRead;
            byte[] data = new byte[buffsize];

            addr = InetAddress.getByName("127.0.0.1");
            DatagramSocket socket = new DatagramSocket();
            while (true) {
                   // Read the next chunk of data from the TargetDataLine.
                   numBytesRead =  line.read(data, 0, data.length);
                   // Save this chunk of data.
                   dgp = new DatagramPacket (data,data.length,addr,50005);

                   socket.send(dgp);
                }

        }catch (LineUnavailableException e) {
            e.printStackTrace();
        }catch (UnknownHostException e) {
            // TODO: handle exception
        } catch (SocketException e) {
            // TODO: handle exception
        } catch (IOException e2) {
            // TODO: handle exception
        }
    }
}

并且服务器端没有问题。它与android客户端AudioRecord完美运行。

Server:

import java.io.ByteArrayInputStream;
import java.net.DatagramPacket;
import java.net.DatagramSocket;

import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.SourceDataLine;

public class Server {

    AudioInputStream audioInputStream;
    static AudioInputStream ais;
    static AudioFormat format;
    static boolean status = true;
    static int port = 50005;
    static int sampleRate = 44100;

    static DataLine.Info dataLineInfo;
    static SourceDataLine sourceDataLine;

    public static void main(String args[]) throws Exception 
    {
        System.out.println("Server started at port:"+port);

        DatagramSocket serverSocket = new DatagramSocket(port);

        /**
         * Formula for lag = (byte_size/sample_rate)*2
         * Byte size 9728 will produce ~ 0.45 seconds of lag. Voice slightly broken.
         * Byte size 1400 will produce ~ 0.06 seconds of lag. Voice extremely broken.
         * Byte size 4000 will produce ~ 0.18 seconds of lag. Voice slightly more broken then 9728.
         */

        byte[] receiveData = new byte[4096];

        format = new AudioFormat(sampleRate, 16, 1, true, false);
        dataLineInfo = new DataLine.Info(SourceDataLine.class, format);
        sourceDataLine = (SourceDataLine) AudioSystem.getLine(dataLineInfo);
        sourceDataLine.open(format);
        sourceDataLine.start();

        //FloatControl volumeControl = (FloatControl) sourceDataLine.getControl(FloatControl.Type.MASTER_GAIN);
        //volumeControl.setValue(1.00f);

        DatagramPacket receivePacket = new DatagramPacket(receiveData, receiveData.length);

        ByteArrayInputStream baiss = new ByteArrayInputStream(receivePacket.getData());

        while (status == true) 
        {
            serverSocket.receive(receivePacket);
            ais = new AudioInputStream(baiss, format, receivePacket.getLength());
            toSpeaker(receivePacket.getData());
        }

        sourceDataLine.drain();
        sourceDataLine.close();
    }

    public static void toSpeaker(byte soundbytes[]) {
        try 
        {
            System.out.println("At the speaker");
            sourceDataLine.write(soundbytes, 0, soundbytes.length);
        } catch (Exception e) {
            System.out.println("Not working in speakers...");
            e.printStackTrace();
        }
    }
}
迈克尔·埃里克·奥伯林

因此,我用正弦波(或某种在某种意义上类似正弦波的东西)填充了麦克风,并且您的程序运行正常。

因此,我的具体更改是:

package audioclient;

import java.io.*;
import java.net.*;
import java.nio.ByteBuffer;

import javax.sound.sampled.*;

public class Mic {
    public byte[] buffer;
    private int port;
    static AudioInputStream ais;

        public static void main(String[] args) {
        TargetDataLine line;
        DatagramPacket dgp;

        AudioFormat.Encoding encoding = AudioFormat.Encoding.PCM_SIGNED;
        float rate = 44100.0f;
        int channels = 2;
        int sampleSize = 16;
        boolean bigEndian = true;
        InetAddress addr;

        AudioFormat format = new AudioFormat(encoding, rate, sampleSize, channels, (sampleSize / 8) * channels, rate, bigEndian);

        DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
        if (!AudioSystem.isLineSupported(info)) {
            System.out.println("Line matching " + info + " not supported.");
            return;
        }

        try {
            line = (TargetDataLine) AudioSystem.getLine(info);

            //TOTALLY missed this.
            int buffsize = line.getBufferSize() / 5;
            buffsize += 512;

            line.open(format);

            line.start();

            int numBytesRead;
            byte[] data = new byte[buffsize];

            /*
             * MICK's injection: We have a buffsize of 512; it is best if the frequency
             * evenly fits into this (avoid skips, bumps, and pops). Additionally, 44100 Hz,
             * with two channels and two bytes per sample. That's four bytes; divide
             * 512 by it, you have 128.
             * 
             * 128 samples, 44100 per second; that's a minimum of 344 samples, or 172 Hz.
             * Well within hearing range; slight skip from the uneven division. Maybe
             * bump it up to 689 Hz.
             * 
             * That's a sine wave of shorts, repeated twice for two channels, with a
             * wavelength of 32 samples.
             * 
             * Note: Changed my mind, ignore specific numbers above.
             * 
             */
            {
                final int λ = 16;
                ByteBuffer buffer = ByteBuffer.allocate(λ * 2 * 8);
                for(int j = 0; j < 2; j++) {
                    for(double i = 0.0; i < λ; i++) {
                        System.out.println(j + " " + i);
                        //once for each sample
                        buffer.putShort((short)(Math.sin(Math.PI * (λ/i)) * Short.MAX_VALUE));
                        buffer.putShort((short)(Math.sin(Math.PI * (λ/i)) * Short.MAX_VALUE));
                    }
                }

                data = buffer.array();
            }

            addr = InetAddress.getByName("127.0.0.1");
            try(DatagramSocket socket = new DatagramSocket()) {
                while (true) {
                    for(byte b : data) System.out.print(b + " ");

                    // Read the next chunk of data from the TargetDataLine.
//                  numBytesRead = line.read(data, 0, data.length);

                    for(int i = 0; i < 64; i++) {
                        byte b = data[i];
                        System.out.print(b + " ");
                    }
                    System.out.println();

                    // Save this chunk of data.
                    dgp = new DatagramPacket(data, data.length, addr, 50005);    

                    for(int i = 0; i < 64; i++) {
                        byte b = dgp.getData()[i];
                        System.out.print(b + " ");
                    }
                    System.out.println();

                    socket.send(dgp);
                }
            }

        } catch (LineUnavailableException e) {
            e.printStackTrace();
        } catch (UnknownHostException e) {
            // TODO: handle exception
        } catch (SocketException e) {
            // TODO: handle exception
        } catch (IOException e2) {
            // TODO: handle exception
        }
    }
}

显然,我将其误解为一个512字节长的片段,并破坏了正弦波,但事实是,它产生的声音恰恰是它的本意-在特定的音调下令人麻木的声音。

考虑到这一点,我不怀疑问题出在您的代码中。我要检查的第一件事是系统正在窃听音频的线路。您是否连接了多个麦克风?网络摄像头麦克风,也许吗?您可能需要使用PulseAudio音量控制之类的实用程序进行检查。如果尚未检查麦克风的功能,也可以这样做。他们确实有寿命。

扰乱音频流中的位并不少见,也不难。但我看不到任何可以执行此操作的地方。

一种想法可能是修改程序,以尝试在本地播放声音,然后再将其发送到服务器。这样,您至少可以确定问题是在麦克风之前还是在麦克风之后。

本文收集自互联网,转载请注明来源。

如有侵权,请联系[email protected] 删除。

编辑于
0

我来说两句

0条评论
登录后参与评论

相关文章

来自分类Dev

在Android上保持实时的WebRTC音频流

来自分类Dev

播放实时音频流-html5

来自分类Dev

HTTP实时音频流服务器

来自分类Dev

实时音频HLS流无法播放

来自分类Dev

在PC上实时捕获音频流

来自分类Dev

在PC上实时捕获音频流

来自分类Dev

通过UDP的低延迟实时音频流

来自分类Dev

如何合并实时视频和音频流

来自分类Dev

实时视频或音频流如何工作?

来自分类Dev

如何使用nginx缓存实时音频流(音频/ mpeg)?

来自分类Dev

将三个实时音频流合并为一个实时音频流

来自分类Dev

是否可以在Twilio呼叫中访问实时音频流?

来自分类Dev

Android实时流视频-音频不起作用

来自分类Dev

如何设置到DLNA兼容设备的实时音频流?

来自分类Dev

如何设置到DLNA兼容设备的实时音频流?

来自分类Dev

使用tcp indy10进行实时音频流

来自分类Dev

实时3D音频流和播放

来自分类Dev

Java,提供HLS实时视频流

来自分类Dev

如何在Matlab中实时处理多个麦克风输入音频流

来自分类Dev

流实时视频并将音频仅中继到icecast2服务器

来自分类Dev

如何从cordova-plugin-audioinput获取音频流以进行实时可视化

来自分类Dev

使用Java EE 7进行实时流音频

来自分类Dev

实时音频处理程序

来自分类Dev

实时音频处理Android

来自分类Dev

实时音频处理程序

来自分类Dev

ALSA实时音频编程

来自分类Dev

Python实时音频混合

来自分类Dev

HTTP实时流(HLS)无法正常运行Java FX 2.2.21

来自分类Dev

使用Java实时重定向控制台输出流