2010-12-09 38 views
0

J'essaie de capturer l'audio du microphone et de l'enregistrer en tant que fichier. Mais ça ne marche pas, je ne peux jouer que le fichier en assignant. Comment puis-je activer le microphone et le mettre en mémoire tampon ou enregistrer ou vider comme .odd/vorbis brut?Comment réparer Gstreamer pour capturer l'audio du microphone et le buffer ou le vider en tant que fichier brut, quand je parle il ne sauvegarde rien

#include <gst/gst.h> 
#include <glib.h> 


static gboolean 
bus_call (GstBus  *bus, 
      GstMessage *msg, 
      gpointer data) 
{ 
    GMainLoop *loop = (GMainLoop *) data; 

    switch (GST_MESSAGE_TYPE (msg)) 
    { 

    case GST_MESSAGE_EOS: 
     g_print ("End of stream\n"); 
     g_main_loop_quit (loop); 
     break; 

    case GST_MESSAGE_ERROR: { 
     gchar *debug; 
     GError *error; 

     gst_message_parse_error (msg, &error, &debug); 
     g_free (debug); 

     g_printerr ("Error: %s\n", error->message); 
     g_error_free (error); 

     g_main_loop_quit (loop); 
     break; 
    } 
    default: 
     break; 
    } 

    return TRUE; 
} 


static void 
on_pad_added (GstElement *element, 
       GstPad  *pad, 
       gpointer data) 
{ 
    GstPad *sinkpad; 
    GstElement *decoder = (GstElement *) data; 

    /* We can now link this pad with the vorbis-decoder sink pad */ 
    g_print ("Dynamic pad created, linking demuxer/decoder\n"); 

    sinkpad = gst_element_get_static_pad (decoder, "sink"); 

    gst_pad_link (pad, sinkpad); 

    gst_object_unref (sinkpad); 
} 



int 
main (int argc, 
     char *argv[]) 
{ 
    GMainLoop *loop; 

    GstElement *pipeline, *source, *demuxer, *decoder, *conv, *sink; 
    GstBus *bus; 

    /* Initialisation */ 
    gst_init (&argc, &argv); 

    loop = g_main_loop_new (NULL, FALSE); 


    /* Check input arguments */ 
    if (argc != 2) { 
    g_printerr ("Usage: %s <Ogg/Vorbis filename>\n", argv[0]); 
    return -1; 
    } 


    /* Create gstreamer elements */ 
    pipeline = gst_pipeline_new ("audio-player"); 
    source = gst_element_factory_make ("filesrc",  "file-source"); 
    demuxer = gst_element_factory_make ("oggdemux",  "ogg-demuxer"); 
    decoder = gst_element_factory_make ("vorbisdec",  "vorbis-decoder"); 
    conv  = gst_element_factory_make ("audioconvert", "converter"); 
    sink  = gst_element_factory_make ("autoaudiosink", "audio-output"); 

    if (!pipeline || !source || !demuxer || !decoder || !conv || !sink) { 
    g_printerr ("One element could not be created. Exiting.\n"); 
    return -1; 
    } 

    /* Set up the pipeline */ 

    /* we set the input filename to the source element */ 
    g_object_set (G_OBJECT (source), "location", argv[1], NULL); 

    /* we add a message handler */ 
    bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); 
    gst_bus_add_watch (bus, bus_call, loop); 
    gst_object_unref (bus); 

    /* we add all elements into the pipeline */ 
    /* file-source | ogg-demuxer | vorbis-decoder | converter | alsa-output */ 
    gst_bin_add_many (GST_BIN (pipeline), 
        source, demuxer, decoder, conv, sink, NULL); 

    /* we link the elements together */ 
    /* file-source -> ogg-demuxer ~> vorbis-decoder -> converter -> alsa-output */ 
    gst_element_link (source, demuxer); 
    gst_element_link_many (decoder, conv, sink, NULL); 
    g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), decoder); 

    /* note that the demuxer will be linked to the decoder dynamically. 
    The reason is that Ogg may contain various streams (for example 
    audio and video). The source pad(s) will be created at run time, 
    by the demuxer when it detects the amount and nature of streams. 
    Therefore we connect a callback function which will be executed 
    when the "pad-added" is emitted.*/ 


    /* Set the pipeline to "playing" state*/ 
    g_print ("Now playing: %s\n", argv[1]); 
    gst_element_set_state (pipeline, GST_STATE_PLAYING); 


    /* Iterate */ 
    g_print ("Running...\n"); 
    g_main_loop_run (loop); 


    /* Out of the main loop, clean up nicely */ 
    g_print ("Returned, stopping playback\n"); 
    gst_element_set_state (pipeline, GST_STATE_NULL); 

    g_print ("Deleting pipeline\n"); 
    gst_object_unref (GST_OBJECT (pipeline)); 

    return 0; 
} 

Répondre

3

Quelle est la question?

sur linux avec PulseAudio, il est aussi simple que

$ gst-launch pulsesrc ! filesink location=dump.raw 
$ gst-launch pulsesrc ! audioconvert ! vorbisenc ! oggmux ! filesink location=dump.ogg 
+0

HaHaHa. Génie, ça marche. Vous venez de répondre à la question. Mais malheureusement, je dois écrire une application multi-plateforme. Si ce n'est pas un linux, comment puis-je faire la même chose alors? – YumYumYum

+3

@Stackfan, vous pouvez utiliser 'autoaudiosrc' au lieu de' pulsesrc' (j'espère) –

1

vous pouvez également utiliser le pipeline suivant:

gst-launch osssrc device=<mic i/p dev> ! audioconvert ! vorbisenc ! oggmux ! filesink location=dump.ogg