1 语音识别理论
2 常用语音功能包
pocketsphinx:集成CMU Sphinx和Festival开源项目中的代码,实现语音识别的功能
audio-common:提供了文本转语音的功能实现完成"机器人说话"的想法
AIML:人工智能标记语音,Artificial Intelligence Markup Language是一种创建自然语音软件代理的XML语言
3 科大讯飞SDK
登录科大讯飞开放平台的官方网站:http://www.xfyun.cn/,注册一个账户并下载SDK
下载的SDK文件(Linux_lat···文件)——解压——示例在samples文件夹下面(比如iat_record_sample,刚下载完成功能包之后,需要咋当前目录下面编译,通过make编译)
编译之后的文件放在bin文件夹下面的,可以看到有一个生成的可执行文件iat_record_sample;
3.1 使用前提
将科大讯飞SDK的库文件拷贝到系统目录下
$ sudo cp libmsc.so/usr/lib/libmsc.so # (注意选择相应的处理器架构) # SDK的库文件在libs中有(x86和x64,在这里面放置的库文件)
科大讯飞的SDK带有ID号,每个人每次下载后的ID都不相同,更换SDK之后需要修改代码中的APPID,你也可以直接使用本课程的libmsc.so文件,否则需要将源码中的APPID修改为自己下载SDK中的ID。
3.2 语音听写
iat_publish.cpp
/* * 语音听写(iFly Auto Transform)技术能够实时地将语音转换成对应的文字。 */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <unistd.h> #include "robot_voice/qisr.h" #include "robot_voice/msp_cmn.h" #include "robot_voice/msp_errors.h" #include "robot_voice/speech_recognizer.h" #include <iconv.h> #include "ros/ros.h" #include "std_msgs/String.h" #define FRAME_LEN 640 #define BUFFER_SIZE 4096 int wakeupFlag = 0 ; int resultFlag = 0 ; static void show_result(char *string, char is_over) { resultFlag=1; printf("\rResult: [ %s ]", string); if(is_over) putchar('\n'); } static char *g_result = NULL; static unsigned int g_buffersize = BUFFER_SIZE; void on_result(const char *result, char is_last) { if (result) { size_t left = g_buffersize - 1 - strlen(g_result); size_t size = strlen(result); if (left < size) { g_result = (char*)realloc(g_result, g_buffersize + BUFFER_SIZE); if (g_result) g_buffersize += BUFFER_SIZE; else { printf("mem alloc failed\n"); return; } } strncat(g_result, result, size); show_result(g_result, is_last); } } void on_speech_begin() { if (g_result) { free(g_result); } g_result = (char*)malloc(BUFFER_SIZE); g_buffersize = BUFFER_SIZE; memset(g_result, 0, g_buffersize); printf("Start Listening...\n"); } void on_speech_end(int reason) { if (reason == END_REASON_VAD_DETECT) printf("\nSpeaking done \n"); else printf("\nRecognizer error %d\n", reason); } /* demo recognize the audio from microphone */ static void demo_mic(const char* session_begin_params) { int errcode; int i = 0; struct speech_rec iat; struct speech_rec_notifier recnotifier = { on_result, on_speech_begin, on_speech_end }; errcode = sr_init(&iat, session_begin_params, SR_MIC, &recnotifier); if (errcode) { printf("speech recognizer init failed\n"); return; } errcode = sr_start_listening(&iat); if (errcode) { printf("start listen failed %d\n", errcode); } /* demo 10 seconds recording */ while(i++ < 10) sleep(1); errcode = sr_stop_listening(&iat); if (errcode) { printf("stop listening failed %d\n", errcode); } sr_uninit(&iat); } /* main thread: start/stop record ; query the result of recgonization. * record thread: record callback(data write) * helper thread: ui(keystroke detection) */ void WakeUp(const std_msgs::String::ConstPtr& msg) { printf("waking up\r\n"); usleep(700*1000); wakeupFlag=1; } int main(int argc, char* argv[]) { // 初始化ROS ros::init(argc, argv, "voiceRecognition"); ros::NodeHandle n; ros::Rate loop_rate(10); // 声明Publisher和Subscriber // 订阅唤醒语音识别的信号 ros::Subscriber wakeUpSub = n.subscribe("voiceWakeup", 1000, WakeUp); // 订阅唤醒语音识别的信号 ros::Publisher voiceWordsPub = n.advertise<std_msgs::String>("voiceWords", 1000); ROS_INFO("Sleeping..."); int count=0; while(ros::ok()) { // 语音识别唤醒 if (wakeupFlag){ ROS_INFO("Wakeup..."); int ret = MSP_SUCCESS; const char* login_params = "appid = 594a7b46, work_dir = ."; const char* session_begin_params = "sub = iat, domain = iat, language = zh_cn, " "accent = mandarin, sample_rate = 16000, " "result_type = plain, result_encoding = utf8"; ret = MSPLogin(NULL, NULL, login_params); if(MSP_SUCCESS != ret){ MSPLogout(); printf("MSPLogin failed , Error code %d.\n",ret); } printf("Demo recognizing the speech from microphone\n"); printf("Speak in 10 seconds\n"); demo_mic(session_begin_params); printf("10 sec passed\n"); wakeupFlag=0; MSPLogout(); } // 语音识别完成 if(resultFlag){ resultFlag=0; std_msgs::String msg; msg.data = g_result; voiceWordsPub.publish(msg); } ros::spinOnce(); loop_rate.sleep(); count++; } exit: MSPLogout(); // Logout... return 0; }
subscriber(订阅者):用来接收语音唤醒信号,接收到唤醒信号之后,会将wakeupFlag变量置位;
主循环中调用SDK的语音听写功能,识别成功后置位resultFlag变量,通过Publisher将识别出来的字符串发布。
第一步,在CMakeLists.txt中加入编译规则
add_executable(iat_publish src/iat_publish.cpp src/speech_recognizer.c src/linuxrec.c) target_link_libraries( iat_publish ${catkin_LIBRARIES} libmsc.so -ldl -lpthread -lm -lrt -lasound )
编译过程需要链接SDK中的libmsc.so库,此前已经将此库拷贝到系统路径下,所以此处不需要添加完整路径。
第二步,语音听写示例
$ roscore $ rosrun robot_voice iat_publish $ rostopic pub /voiceWakeup std_msgs/String "data:'any string'" 'any string' ''里面可以随便填
3.3 语音合成
语音合成(Text To Speech,TTS)技术能够自动将任意文字实时转换为连续的自然语音,是一种能够在任何时间、任何地点,向任何人提供语音信息服务的高效便捷手段,非常符合信息时代海量数据、动态更新和个性化查询的需求。
tts_subscribe.cpp
/* * 语音合成(Text To Speech,TTS)技术能够自动将任意文字实时转换为连续的 * 自然语音,是一种能够在任何时间、任何地点,向任何人提供语音信息服务的 * 高效便捷手段,非常符合信息时代海量数据、动态更新和个性化查询的需求。 */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include "robot_voice/qtts.h" #include "robot_voice/msp_cmn.h" #include "robot_voice/msp_errors.h" #include "ros/ros.h" #include "std_msgs/String.h" #include <sstream> #include <sys/types.h> #include <sys/stat.h> /* wav音频头部格式 */ typedef struct _wave_pcm_hdr { char riff[4]; // = "RIFF" int size_8; // = FileSize - 8 char wave[4]; // = "WAVE" char fmt[4]; // = "fmt " int fmt_size; // = 下一个结构体的大小 : 16 short int format_tag; // = PCM : 1 short int channels; // = 通道数 : 1 int samples_per_sec; // = 采样率 : 8000 | 6000 | 11025 | 16000 int avg_bytes_per_sec; // = 每秒字节数 : samples_per_sec * bits_per_sample / 8 short int block_align; // = 每采样点字节数 : wBitsPerSample / 8 short int bits_per_sample; // = 量化比特数: 8 | 16 char data[4]; // = "data"; int data_size; // = 纯数据长度 : FileSize - 44 } wave_pcm_hdr; /* 默认wav音频头部数据 */ wave_pcm_hdr default_wav_hdr = { { 'R', 'I', 'F', 'F' }, 0, {'W', 'A', 'V', 'E'}, {'f', 'm', 't', ' '}, 16, 1, 1, 16000, 32000, 2, 16, {'d', 'a', 't', 'a'}, 0 }; /* 文本合成 */ int text_to_speech(const char* src_text, const char* des_path, const char* params) { int ret = -1; FILE* fp = NULL; const char* sessionID = NULL; unsigned int audio_len = 0; wave_pcm_hdr wav_hdr = default_wav_hdr; int synth_status = MSP_TTS_FLAG_STILL_HAVE_DATA; if (NULL == src_text || NULL == des_path) { printf("params is error!\n"); return ret; } fp = fopen(des_path, "wb"); if (NULL == fp) { printf("open %s error.\n", des_path); return ret; } /* 开始合成 */ sessionID = QTTSSessionBegin(params, &ret); if (MSP_SUCCESS != ret) { printf("QTTSSessionBegin failed, error code: %d.\n", ret); fclose(fp); return ret; } ret = QTTSTextPut(sessionID, src_text, (unsigned int)strlen(src_text), NULL); if (MSP_SUCCESS != ret) { printf("QTTSTextPut failed, error code: %d.\n",ret); QTTSSessionEnd(sessionID, "TextPutError"); fclose(fp); return ret; } printf("正在合成 ...\n"); fwrite(&wav_hdr, sizeof(wav_hdr) ,1, fp); //添加wav音频头,使用采样率为16000 while (1) { /* 获取合成音频 */ const void* data = QTTSAudioGet(sessionID, &audio_len, &synth_status, &ret); if (MSP_SUCCESS != ret) break; if (NULL != data) { fwrite(data, audio_len, 1, fp); wav_hdr.data_size += audio_len; //计算data_size大小 } if (MSP_TTS_FLAG_DATA_END == synth_status) break; printf(">"); usleep(150*1000); //防止频繁占用CPU }//合成状态synth_status取值请参阅《讯飞语音云API文档》 printf("\n"); if (MSP_SUCCESS != ret) { printf("QTTSAudioGet failed, error code: %d.\n",ret); QTTSSessionEnd(sessionID, "AudioGetError"); fclose(fp); return ret; } /* 修正wav文件头数据的大小 */ wav_hdr.size_8 += wav_hdr.data_size + (sizeof(wav_hdr) - 8); /* 将修正过的数据写回文件头部,音频文件为wav格式 */ fseek(fp, 4, 0); fwrite(&wav_hdr.size_8,sizeof(wav_hdr.size_8), 1, fp); //写入size_8的值 fseek(fp, 40, 0); //将文件指针偏移到存储data_size值的位置 fwrite(&wav_hdr.data_size,sizeof(wav_hdr.data_size), 1, fp); //写入data_size的值 fclose(fp); fp = NULL; /* 合成完毕 */ ret = QTTSSessionEnd(sessionID, "Normal"); if (MSP_SUCCESS != ret) { printf("QTTSSessionEnd failed, error code: %d.\n",ret); } return ret; } void voiceWordsCallback(const std_msgs::String::ConstPtr& msg) { char cmd[2000]; const char* text; int ret = MSP_SUCCESS; const char* session_begin_params = "voice_name = xiaoyan, text_encoding = utf8, sample_rate = 16000, speed = 50, volume = 50, pitch = 50, rdn = 2"; const char* filename = "tts_sample.wav"; //合成的语音文件名称 std::cout<<"I heard :"<<msg->data.c_str()<<std::endl; text = msg->data.c_str(); /* 文本合成 */ printf("开始合成 ...\n"); ret = text_to_speech(text, filename, session_begin_params); if (MSP_SUCCESS != ret) { printf("text_to_speech failed, error code: %d.\n", ret); } printf("合成完毕\n"); unlink("/tmp/cmd"); mkfifo("/tmp/cmd", 0777); popen("mplayer -quiet -slave -input file=/tmp/cmd 'tts_sample.wav'","r"); sleep(3); } void toExit() { printf("按任意键退出 ...\n"); getchar(); MSPLogout(); //退出登录 } int main(int argc, char* argv[]) { int ret = MSP_SUCCESS; const char* login_params = "appid = 594a7b46, work_dir = .";//登录参数,appid与msc库绑定,请勿随意改动 /* * rdn: 合成音频数字发音方式 * volume: 合成音频的音量 * pitch: 合成音频的音调 * speed: 合成音频对应的语速 * voice_name: 合成发音人 * sample_rate: 合成音频采样率 * text_encoding: 合成文本编码格式 * * 详细参数说明请参阅《讯飞语音云MSC--API文档》 */ /* 用户登录 */ ret = MSPLogin(NULL, NULL, login_params);//第一个参数是用户名,第二个参数是密码,第三个参数是登录参数,用户名和密码可在http://open.voicecloud.cn注册获取 if (MSP_SUCCESS != ret) { printf("MSPLogin failed, error code: %d.\n", ret); /*goto exit ;*///登录失败,退出登录 toExit(); } printf("\n###########################################################################\n"); printf("## 语音合成(Text To Speech,TTS)技术能够自动将任意文字实时转换为连续的 ##\n"); printf("## 自然语音,是一种能够在任何时间、任何地点,向任何人提供语音信息服务的 ##\n"); printf("## 高效便捷手段,非常符合信息时代海量数据、动态更新和个性化查询的需求。 ##\n"); printf("###########################################################################\n\n"); ros::init(argc,argv,"TextToSpeech"); ros::NodeHandle n; ros::Subscriber sub =n.subscribe("voiceWords", 1000,voiceWordsCallback); ros::spin(); exit: printf("按任意键退出 ...\n"); getchar(); MSPLogout(); //退出登录 return 0; }
- main函数中声明了一个订阅voiceWords话题的subscriber,接收输入的语音字符串。
- 回调函数voiceWordsCallback中使用SDK接口将字符串转换成中文语音。
第一步,在CMakeLists.txt中加入编译规则
add_executable(tts_subscribe src/tts_subscribe.cpp) target_link_libraries( tts_subscribe ${catkin_LIBRARIES} libmsc.so -ldl -pthread )
第二步,语音合成示例
$ roscore $ rosrun robot_voice tts_subscribe $ rostopic pub/voiceWords std_msgs/String"data:'你好,我是机器人'"
语音合成启动后可能产生的错误
原因:mplayer配置导致
解决方法:在$HOME/.mplayer/config文件中添加如下设置:lirc=no
3.4 智能语音助手
voice_assistant.cpp
#include <iostream> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <time.h> #include "robot_voice/qtts.h" #include "robot_voice/msp_cmn.h" #include "robot_voice/msp_errors.h" #include "ros/ros.h" #include "std_msgs/String.h" #include <sstream> #include <sys/types.h> #include <sys/stat.h> /* wav音频头部格式 */ typedef struct _wave_pcm_hdr { char riff[4]; // = "RIFF" int size_8; // = FileSize - 8 char wave[4]; // = "WAVE" char fmt[4]; // = "fmt " int fmt_size; // = 下一个结构体的大小 : 16 short int format_tag; // = PCM : 1 short int channels; // = 通道数 : 1 int samples_per_sec; // = 采样率 : 8000 | 6000 | 11025 | 16000 int avg_bytes_per_sec; // = 每秒字节数 : samples_per_sec * bits_per_sample / 8 short int block_align; // = 每采样点字节数 : wBitsPerSample / 8 short int bits_per_sample; // = 量化比特数: 8 | 16 char data[4]; // = "data"; int data_size; // = 纯数据长度 : FileSize - 44 } wave_pcm_hdr; /* 默认wav音频头部数据 */ wave_pcm_hdr default_wav_hdr = { { 'R', 'I', 'F', 'F' }, 0, {'W', 'A', 'V', 'E'}, {'f', 'm', 't', ' '}, 16, 1, 1, 16000, 32000, 2, 16, {'d', 'a', 't', 'a'}, 0 }; /* 文本合成 */ int text_to_speech(const char* src_text, const char* des_path, const char* params) { int ret = -1; FILE* fp = NULL; const char* sessionID = NULL; unsigned int audio_len = 0; wave_pcm_hdr wav_hdr = default_wav_hdr; int synth_status = MSP_TTS_FLAG_STILL_HAVE_DATA; if (NULL == src_text || NULL == des_path) { printf("params is error!\n"); return ret; } fp = fopen(des_path, "wb"); if (NULL == fp) { printf("open %s error.\n", des_path); return ret; } /* 开始合成 */ sessionID = QTTSSessionBegin(params, &ret); if (MSP_SUCCESS != ret) { printf("QTTSSessionBegin failed, error code: %d.\n", ret); fclose(fp); return ret; } ret = QTTSTextPut(sessionID, src_text, (unsigned int)strlen(src_text), NULL); if (MSP_SUCCESS != ret) { printf("QTTSTextPut failed, error code: %d.\n",ret); QTTSSessionEnd(sessionID, "TextPutError"); fclose(fp); return ret; } printf("正在合成 ...\n"); fwrite(&wav_hdr, sizeof(wav_hdr) ,1, fp); //添加wav音频头,使用采样率为16000 while (1) { /* 获取合成音频 */ const void* data = QTTSAudioGet(sessionID, &audio_len, &synth_status, &ret); if (MSP_SUCCESS != ret) break; if (NULL != data) { fwrite(data, audio_len, 1, fp); wav_hdr.data_size += audio_len; //计算data_size大小 } if (MSP_TTS_FLAG_DATA_END == synth_status) break; printf(">"); usleep(150*1000); //防止频繁占用CPU }//合成状态synth_status取值请参阅《讯飞语音云API文档》 printf("\n"); if (MSP_SUCCESS != ret) { printf("QTTSAudioGet failed, error code: %d.\n",ret); QTTSSessionEnd(sessionID, "AudioGetError"); fclose(fp); return ret; } /* 修正wav文件头数据的大小 */ wav_hdr.size_8 += wav_hdr.data_size + (sizeof(wav_hdr) - 8); /* 将修正过的数据写回文件头部,音频文件为wav格式 */ fseek(fp, 4, 0); fwrite(&wav_hdr.size_8,sizeof(wav_hdr.size_8), 1, fp); //写入size_8的值 fseek(fp, 40, 0); //将文件指针偏移到存储data_size值的位置 fwrite(&wav_hdr.data_size,sizeof(wav_hdr.data_size), 1, fp); //写入data_size的值 fclose(fp); fp = NULL; /* 合成完毕 */ ret = QTTSSessionEnd(sessionID, "Normal"); if (MSP_SUCCESS != ret) { printf("QTTSSessionEnd failed, error code: %d.\n",ret); } return ret; } std::string to_string(int val) { char buf[20]; sprintf(buf, "%d", val); return std::string(buf); } void voiceWordsCallback(const std_msgs::String::ConstPtr& msg) { char cmd[2000]; const char* text; int ret = MSP_SUCCESS; const char* session_begin_params = "voice_name = xiaoyan, text_encoding = utf8, sample_rate = 16000, speed = 50, volume = 50, pitch = 50, rdn = 2"; const char* filename = "tts_sample.wav"; //合成的语音文件名称 std::cout<<"I heard :"<<msg->data.c_str()<<std::endl; std::string dataString = msg->data; if(dataString.compare("你是谁?") == 0) { char nameString[40] = "我是你的语音小助手"; text = nameString; std::cout<<text<<std::endl; } else if(dataString.compare("你可以做什么?") == 0) { char helpString[40] = "你可以问我现在时间"; text = helpString; std::cout<<text<<std::endl; } else if(dataString.compare("现在时间。") == 0) { //获取当前时间 struct tm *ptm; long ts; ts = time(NULL); ptm = localtime(&ts); std::string string = "现在时间" + to_string(ptm-> tm_hour) + "点" + to_string(ptm-> tm_min) + "分"; char timeString[40]; string.copy(timeString, sizeof(string), 0); text = timeString; std::cout<<text<<std::endl; } else { text = msg->data.c_str(); } /* 文本合成 */ printf("开始合成 ...\n"); ret = text_to_speech(text, filename, session_begin_params); if (MSP_SUCCESS != ret) { printf("text_to_speech failed, error code: %d.\n", ret); } printf("合成完毕\n"); unlink("/tmp/cmd"); mkfifo("/tmp/cmd", 0777); popen("mplayer -quiet -slave -input file=/tmp/cmd 'tts_sample.wav'","r"); sleep(3); } void toExit() { printf("按任意键退出 ...\n"); getchar(); MSPLogout(); //退出登录 } int main(int argc, char* argv[]) { int ret = MSP_SUCCESS; const char* login_params = "appid = 594a7b46, work_dir = .";//登录参数,appid与msc库绑定,请勿随意改动 /* * rdn: 合成音频数字发音方式 * volume: 合成音频的音量 * pitch: 合成音频的音调 * speed: 合成音频对应的语速 * voice_name: 合成发音人 * sample_rate: 合成音频采样率 * text_encoding: 合成文本编码格式 * * 详细参数说明请参阅《讯飞语音云MSC--API文档》 */ /* 用户登录 */ ret = MSPLogin(NULL, NULL, login_params);//第一个参数是用户名,第二个参数是密码,第三个参数是登录参数,用户名和密码可在http://open.voicecloud.cn注册获取 if (MSP_SUCCESS != ret) { printf("MSPLogin failed, error code: %d.\n", ret); /*goto exit ;*///登录失败,退出登录 toExit(); } printf("\n###########################################################################\n"); printf("## 语音合成(Text To Speech,TTS)技术能够自动将任意文字实时转换为连续的 ##\n"); printf("## 自然语音,是一种能够在任何时间、任何地点,向任何人提供语音信息服务的 ##\n"); printf("## 高效便捷手段,非常符合信息时代海量数据、动态更新和个性化查询的需求。 ##\n"); printf("###########################################################################\n\n"); ros::init(argc,argv,"TextToSpeech"); ros::NodeHandle n; ros::Subscriber sub =n.subscribe("voiceWords", 1000,voiceWordsCallback); ros::spin(); exit: printf("按任意键退出 ...\n"); getchar(); MSPLogout(); //退出登录 return 0; }
第一步,在CMakeLists.txt中加入编译规则
add_executable(voice_assistant src/voice_assistant.cpp) target_link_libraries( voice_assistant ${catkin_LIBRARIES} libmsc.so -ldl -pthread )
第二步,智能语音助手示例
$ roscore $ rosrun robot_voice iat_publish # 启动语音识别节点 $ rosrun robot_voice voice_assistant # 启动语音助手作为语音判断 $ rostopic pub /voiceWakeup std_msgs/String "data:'any string'" # 唤醒词
参考资料
1.ROS探索总结(二十八)——机器听觉
2.ROS Kinetic使用PocketSphinx进行语音识别
https://blog.csdn.net/seeseeatre/article/details/79228816
3.讯飞开放平台-以语音交互为核心的人工智能开放平台