{"id":1228,"date":"2021-10-19T17:06:18","date_gmt":"2021-10-19T09:06:18","guid":{"rendered":"https:\/\/aif.amtbbs.org\/?p=1228"},"modified":"2021-10-19T17:06:18","modified_gmt":"2021-10-19T09:06:18","slug":"%e8%bf%99725%e4%b8%aa%e6%9c%ba%e5%99%a8%e5%ad%a6%e4%b9%a0%e6%9c%af%e8%af%ad%e8%a1%a8%ef%bc%8c%e5%a4%aa%e5%85%a8%e4%ba%86%ef%bc%81","status":"publish","type":"post","link":"https:\/\/aif.amtbbs.org\/index.php\/2021\/10\/19\/1228\/","title":{"rendered":"\u8fd9725\u4e2a\u673a\u5668\u5b66\u4e60\u672f\u8bed\u8868\uff0c\u592a\u5168\u4e86\uff01"},"content":{"rendered":"<section data-tool=\"mdnice\u7f16\u8f91\u5668\" data-website=\"https:\/\/www.mdnice.com\" data-mpa-powered-by=\"yiban.io\">\n<p data-tool=\"mdnice\u7f16\u8f91\u5668\">\u4f60\u597d\uff0c\u6211\u662fzhenguo<\/p>\n<p data-tool=\"mdnice\u7f16\u8f91\u5668\">\u8fd9\u662f\u51e0\u4f4d\u673a\u5668\u5b66\u4e60\u6743\u5a01\u4e13\u5bb6\u6c47\u603b\u7684725\u4e2a\u673a\u5668\u5b66\u4e60\u672f\u8bed\u8868\uff0c\u975e\u5e38\u5168\u9762\u4e86\uff0c\u503c\u5f97\u6536\u85cf\uff01<\/p>\n<section data-tool=\"mdnice\u7f16\u8f91\u5668\">\n<table width=\"657\">\n<thead>\n<tr>\n<th><strong>\u82f1\u6587\u672f\u8bed<\/strong><\/th>\n<th><strong>\u4e2d\u6587\u7ffb\u8bd1<\/strong><\/th>\n<\/tr>\n<\/thead>\n<tbody>\n<tr>\n<td>0-1 Loss Function<\/td>\n<td>0-1\u635f\u5931\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Accept-Reject Sampling Method<\/td>\n<td>\u63a5\u53d7-\u62d2\u7edd\u62bd\u6837\u6cd5\/\u63a5\u53d7-\u62d2\u7edd\u91c7\u6837\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Accumulated Error Backpropagation<\/td>\n<td>\u7d2f\u79ef\u8bef\u5dee\u53cd\u5411\u4f20\u64ad<\/td>\n<\/tr>\n<tr>\n<td>Accuracy<\/td>\n<td>\u7cbe\u5ea6<\/td>\n<\/tr>\n<tr>\n<td>Acquisition Function<\/td>\n<td>\u91c7\u96c6\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Action<\/td>\n<td>\u52a8\u4f5c<\/td>\n<\/tr>\n<tr>\n<td>Activation Function<\/td>\n<td>\u6fc0\u6d3b\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Active Learning<\/td>\n<td>\u4e3b\u52a8\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Adaptive Bitrate Algorithm<\/td>\n<td>\u81ea\u9002\u5e94\u6bd4\u7279\u7387\u7b97\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Adaptive Boosting<\/td>\n<td>AdaBoost<\/td>\n<\/tr>\n<tr>\n<td>Adaptive Gradient Algorithm<\/td>\n<td>AdaGrad<\/td>\n<\/tr>\n<tr>\n<td>Adaptive Moment Estimation Algorithm<\/td>\n<td>Adam\u7b97\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Adaptive Resonance Theory<\/td>\n<td>\u81ea\u9002\u5e94\u8c10\u632f\u7406\u8bba<\/td>\n<\/tr>\n<tr>\n<td>Additive Model<\/td>\n<td>\u52a0\u6027\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Affinity Matrix<\/td>\n<td>\u4eb2\u548c\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Agent<\/td>\n<td>\u667a\u80fd\u4f53<\/td>\n<\/tr>\n<tr>\n<td>Algorithm<\/td>\n<td>\u7b97\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Alpha-Beta Pruning<\/td>\n<td>\u03b1-\u03b2\u4fee\u526a\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Anomaly Detection<\/td>\n<td>\u5f02\u5e38\u68c0\u6d4b<\/td>\n<\/tr>\n<tr>\n<td>Approximate Inference<\/td>\n<td>\u8fd1\u4f3c\u63a8\u65ad<\/td>\n<\/tr>\n<tr>\n<td>Area Under ROC Curve<\/td>\n<td>AUC<\/td>\n<\/tr>\n<tr>\n<td>Artificial Intelligence<\/td>\n<td>\u4eba\u5de5\u667a\u80fd<\/td>\n<\/tr>\n<tr>\n<td>Artificial Neural Network<\/td>\n<td>\u4eba\u5de5\u795e\u7ecf\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Artificial Neuron<\/td>\n<td>\u4eba\u5de5\u795e\u7ecf\u5143<\/td>\n<\/tr>\n<tr>\n<td>Attention<\/td>\n<td>\u6ce8\u610f\u529b<\/td>\n<\/tr>\n<tr>\n<td>Attention Mechanism<\/td>\n<td>\u6ce8\u610f\u529b\u673a\u5236<\/td>\n<\/tr>\n<tr>\n<td>Attribute<\/td>\n<td>\u5c5e\u6027<\/td>\n<\/tr>\n<tr>\n<td>Attribute Space<\/td>\n<td>\u5c5e\u6027\u7a7a\u95f4<\/td>\n<\/tr>\n<tr>\n<td>Autoencoder<\/td>\n<td>\u81ea\u7f16\u7801\u5668<\/td>\n<\/tr>\n<tr>\n<td>Automatic Differentiation<\/td>\n<td>\u81ea\u52a8\u5fae\u5206<\/td>\n<\/tr>\n<tr>\n<td>Autoregressive Model<\/td>\n<td>\u81ea\u56de\u5f52\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Back Propagation<\/td>\n<td>\u53cd\u5411\u4f20\u64ad<\/td>\n<\/tr>\n<tr>\n<td>Back Propagation Algorithm<\/td>\n<td>\u53cd\u5411\u4f20\u64ad\u7b97\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Back Propagation Through Time<\/td>\n<td>\u968f\u65f6\u95f4\u53cd\u5411\u4f20\u64ad<\/td>\n<\/tr>\n<tr>\n<td>Backward Induction<\/td>\n<td>\u53cd\u5411\u5f52\u7eb3<\/td>\n<\/tr>\n<tr>\n<td>Backward Search<\/td>\n<td>\u53cd\u5411\u641c\u7d22<\/td>\n<\/tr>\n<tr>\n<td>Bag of Words<\/td>\n<td>\u8bcd\u888b<\/td>\n<\/tr>\n<tr>\n<td>Bandit<\/td>\n<td>\u8d4c\u535a\u673a\/\u8001\u864e\u673a<\/td>\n<\/tr>\n<tr>\n<td>Base Learner<\/td>\n<td>\u57fa\u5b66\u4e60\u5668<\/td>\n<\/tr>\n<tr>\n<td>Base Learning Algorithm<\/td>\n<td>\u57fa\u5b66\u4e60\u7b97\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Baseline<\/td>\n<td>\u57fa\u51c6<\/td>\n<\/tr>\n<tr>\n<td>Batch<\/td>\n<td>\u6279\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Batch Normalization<\/td>\n<td>\u6279\u91cf\u89c4\u8303\u5316<\/td>\n<\/tr>\n<tr>\n<td>Bayes Decision Rule<\/td>\n<td>\u8d1d\u53f6\u65af\u51b3\u7b56\u51c6\u5219<\/td>\n<\/tr>\n<tr>\n<td>Bayes Model Averaging<\/td>\n<td>\u8d1d\u53f6\u65af\u6a21\u578b\u5e73\u5747<\/td>\n<\/tr>\n<tr>\n<td>Bayes Optimal Classifier<\/td>\n<td>\u8d1d\u53f6\u65af\u6700\u4f18\u5206\u7c7b\u5668<\/td>\n<\/tr>\n<tr>\n<td>Bayes&#8217; Theorem<\/td>\n<td>\u8d1d\u53f6\u65af\u5b9a\u7406<\/td>\n<\/tr>\n<tr>\n<td>Bayesian Decision Theory<\/td>\n<td>\u8d1d\u53f6\u65af\u51b3\u7b56\u7406\u8bba<\/td>\n<\/tr>\n<tr>\n<td>Bayesian Inference<\/td>\n<td>\u8d1d\u53f6\u65af\u63a8\u65ad<\/td>\n<\/tr>\n<tr>\n<td>Bayesian Learning<\/td>\n<td>\u8d1d\u53f6\u65af\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Bayesian Network<\/td>\n<td>\u8d1d\u53f6\u65af\u7f51\/\u8d1d\u53f6\u65af\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Bayesian Optimization<\/td>\n<td>\u8d1d\u53f6\u65af\u4f18\u5316<\/td>\n<\/tr>\n<tr>\n<td>Beam Search<\/td>\n<td>\u675f\u641c\u7d22<\/td>\n<\/tr>\n<tr>\n<td>Benchmark<\/td>\n<td>\u57fa\u51c6<\/td>\n<\/tr>\n<tr>\n<td>Belief Network<\/td>\n<td>\u4fe1\u5ff5\u7f51\/\u4fe1\u5ff5\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Belief Propagation<\/td>\n<td>\u4fe1\u5ff5\u4f20\u64ad<\/td>\n<\/tr>\n<tr>\n<td>Bellman Equation<\/td>\n<td>\u8d1d\u5c14\u66fc\u65b9\u7a0b<\/td>\n<\/tr>\n<tr>\n<td>Bernoulli Distribution<\/td>\n<td>\u4f2f\u52aa\u5229\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Beta Distribution<\/td>\n<td>\u8d1d\u5854\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Between-Class Scatter Matrix<\/td>\n<td>\u7c7b\u95f4\u6563\u5ea6\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>BFGS<\/td>\n<td>BFGS<\/td>\n<\/tr>\n<tr>\n<td>Bias<\/td>\n<td>\u504f\u5dee\/\u504f\u7f6e<\/td>\n<\/tr>\n<tr>\n<td>Bias In Affine Function<\/td>\n<td>\u504f\u7f6e<\/td>\n<\/tr>\n<tr>\n<td>Bias In Statistics<\/td>\n<td>\u504f\u5dee<\/td>\n<\/tr>\n<tr>\n<td>Bias Shift<\/td>\n<td>\u504f\u7f6e\u504f\u79fb<\/td>\n<\/tr>\n<tr>\n<td>Bias-Variance Decomposition<\/td>\n<td>\u504f\u5dee &#8211; \u65b9\u5dee\u5206\u89e3<\/td>\n<\/tr>\n<tr>\n<td>Bias-Variance Dilemma<\/td>\n<td>\u504f\u5dee &#8211; \u65b9\u5dee\u56f0\u5883<\/td>\n<\/tr>\n<tr>\n<td>Bidirectional Recurrent Neural Network<\/td>\n<td>\u53cc\u5411\u5faa\u73af\u795e\u7ecf\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Bigram<\/td>\n<td>\u4e8c\u5143\u8bed\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Bilingual Evaluation Understudy<\/td>\n<td>BLEU<\/td>\n<\/tr>\n<tr>\n<td>Binary Classification<\/td>\n<td>\u4e8c\u5206\u7c7b<\/td>\n<\/tr>\n<tr>\n<td>Binomial Distribution<\/td>\n<td>\u4e8c\u9879\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Binomial Test<\/td>\n<td>\u4e8c\u9879\u68c0\u9a8c<\/td>\n<\/tr>\n<tr>\n<td>Boltzmann Distribution<\/td>\n<td>\u73bb\u5c14\u5179\u66fc\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Boltzmann Machine<\/td>\n<td>\u73bb\u5c14\u5179\u66fc\u673a<\/td>\n<\/tr>\n<tr>\n<td>Boosting<\/td>\n<td>Boosting<\/td>\n<\/tr>\n<tr>\n<td>Bootstrap Aggregating<\/td>\n<td>Bagging<\/td>\n<\/tr>\n<tr>\n<td>Bootstrap Sampling<\/td>\n<td>\u81ea\u52a9\u91c7\u6837\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Bootstrapping<\/td>\n<td>\u81ea\u52a9\u6cd5\/\u81ea\u4e3e\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Break-Event Point<\/td>\n<td>\u5e73\u8861\u70b9<\/td>\n<\/tr>\n<tr>\n<td>Bucketing<\/td>\n<td>\u5206\u6876<\/td>\n<\/tr>\n<tr>\n<td>Calculus of Variations<\/td>\n<td>\u53d8\u5206\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Cascade-Correlation<\/td>\n<td>\u7ea7\u8054\u76f8\u5173<\/td>\n<\/tr>\n<tr>\n<td>Catastrophic Forgetting<\/td>\n<td>\u707e\u96be\u6027\u9057\u5fd8<\/td>\n<\/tr>\n<tr>\n<td>Categorical Distribution<\/td>\n<td>\u7c7b\u522b\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Cell<\/td>\n<td>\u5355\u5143<\/td>\n<\/tr>\n<tr>\n<td>Chain Rule<\/td>\n<td>\u94fe\u5f0f\u6cd5\u5219<\/td>\n<\/tr>\n<tr>\n<td>Chebyshev Distance<\/td>\n<td>\u5207\u6bd4\u96ea\u592b\u8ddd\u79bb<\/td>\n<\/tr>\n<tr>\n<td>Class<\/td>\n<td>\u7c7b\u522b<\/td>\n<\/tr>\n<tr>\n<td>Class-Imbalance<\/td>\n<td>\u7c7b\u522b\u4e0d\u5e73\u8861<\/td>\n<\/tr>\n<tr>\n<td>Classification<\/td>\n<td>\u5206\u7c7b<\/td>\n<\/tr>\n<tr>\n<td>Classification And Regression Tree<\/td>\n<td>\u5206\u7c7b\u4e0e\u56de\u5f52\u6811<\/td>\n<\/tr>\n<tr>\n<td>Classifier<\/td>\n<td>\u5206\u7c7b\u5668<\/td>\n<\/tr>\n<tr>\n<td>Clique<\/td>\n<td>\u56e2<\/td>\n<\/tr>\n<tr>\n<td>Cluster<\/td>\n<td>\u7c07<\/td>\n<\/tr>\n<tr>\n<td>Cluster Assumption<\/td>\n<td>\u805a\u7c7b\u5047\u8bbe<\/td>\n<\/tr>\n<tr>\n<td>Clustering<\/td>\n<td>\u805a\u7c7b<\/td>\n<\/tr>\n<tr>\n<td>Clustering Ensemble<\/td>\n<td>\u805a\u7c7b\u96c6\u6210<\/td>\n<\/tr>\n<tr>\n<td>Co-Training<\/td>\n<td>\u534f\u540c\u8bad\u7ec3<\/td>\n<\/tr>\n<tr>\n<td>Coding Matrix<\/td>\n<td>\u7f16\u7801\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Collaborative Filtering<\/td>\n<td>\u534f\u540c\u8fc7\u6ee4<\/td>\n<\/tr>\n<tr>\n<td>Competitive Learning<\/td>\n<td>\u7ade\u4e89\u578b\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Comprehensibility<\/td>\n<td>\u53ef\u89e3\u91ca\u6027<\/td>\n<\/tr>\n<tr>\n<td>Computation Graph<\/td>\n<td>\u8ba1\u7b97\u56fe<\/td>\n<\/tr>\n<tr>\n<td>Computational Learning Theory<\/td>\n<td>\u8ba1\u7b97\u5b66\u4e60\u7406\u8bba<\/td>\n<\/tr>\n<tr>\n<td>Conditional Entropy<\/td>\n<td>\u6761\u4ef6\u71b5<\/td>\n<\/tr>\n<tr>\n<td>Conditional Probability<\/td>\n<td>\u6761\u4ef6\u6982\u7387<\/td>\n<\/tr>\n<tr>\n<td>Conditional Probability Distribution<\/td>\n<td>\u6761\u4ef6\u6982\u7387\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Conditional Random Field<\/td>\n<td>\u6761\u4ef6\u968f\u673a\u573a<\/td>\n<\/tr>\n<tr>\n<td>Conditional Risk<\/td>\n<td>\u6761\u4ef6\u98ce\u9669<\/td>\n<\/tr>\n<tr>\n<td>Confidence<\/td>\n<td>\u7f6e\u4fe1\u5ea6<\/td>\n<\/tr>\n<tr>\n<td>Confusion Matrix<\/td>\n<td>\u6df7\u6dc6\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Conjugate Distribution<\/td>\n<td>\u5171\u8f6d\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Connection Weight<\/td>\n<td>\u8fde\u63a5\u6743<\/td>\n<\/tr>\n<tr>\n<td>Connectionism<\/td>\n<td>\u8fde\u63a5\u4e3b\u4e49<\/td>\n<\/tr>\n<tr>\n<td>Consistency<\/td>\n<td>\u4e00\u81f4\u6027<\/td>\n<\/tr>\n<tr>\n<td>Constrained Optimization<\/td>\n<td>\u7ea6\u675f\u4f18\u5316<\/td>\n<\/tr>\n<tr>\n<td>Context Variable<\/td>\n<td>\u4e0a\u4e0b\u6587\u53d8\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Context Vector<\/td>\n<td>\u4e0a\u4e0b\u6587\u5411\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Context Window<\/td>\n<td>\u4e0a\u4e0b\u6587\u7a97\u53e3<\/td>\n<\/tr>\n<tr>\n<td>Context Word<\/td>\n<td>\u4e0a\u4e0b\u6587\u8bcd<\/td>\n<\/tr>\n<tr>\n<td>Contextual Bandit<\/td>\n<td>\u4e0a\u4e0b\u6587\u8d4c\u535a\u673a\/\u4e0a\u4e0b\u6587\u8001\u864e\u673a<\/td>\n<\/tr>\n<tr>\n<td>Contingency Table<\/td>\n<td>\u5217\u8054\u8868<\/td>\n<\/tr>\n<tr>\n<td>Continuous Attribute<\/td>\n<td>\u8fde\u7eed\u5c5e\u6027<\/td>\n<\/tr>\n<tr>\n<td>Contrastive Divergence<\/td>\n<td>\u5bf9\u6bd4\u6563\u5ea6<\/td>\n<\/tr>\n<tr>\n<td>Convergence<\/td>\n<td>\u6536\u655b<\/td>\n<\/tr>\n<tr>\n<td>Convex Optimization<\/td>\n<td>\u51f8\u4f18\u5316<\/td>\n<\/tr>\n<tr>\n<td>Convex Quadratic Programming<\/td>\n<td>\u51f8\u4e8c\u6b21\u89c4\u5212<\/td>\n<\/tr>\n<tr>\n<td>Convolution<\/td>\n<td>\u5377\u79ef<\/td>\n<\/tr>\n<tr>\n<td>Convolutional Kernel<\/td>\n<td>\u5377\u79ef\u6838<\/td>\n<\/tr>\n<tr>\n<td>Convolutional Neural Network<\/td>\n<td>\u5377\u79ef\u795e\u7ecf\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Coordinate Descent<\/td>\n<td>\u5750\u6807\u4e0b\u964d<\/td>\n<\/tr>\n<tr>\n<td>Corpus<\/td>\n<td>\u8bed\u6599\u5e93<\/td>\n<\/tr>\n<tr>\n<td>Correlation Coefficient<\/td>\n<td>\u76f8\u5173\u7cfb\u6570<\/td>\n<\/tr>\n<tr>\n<td>Cosine Similarity<\/td>\n<td>\u4f59\u5f26\u76f8\u4f3c\u5ea6<\/td>\n<\/tr>\n<tr>\n<td>Cost<\/td>\n<td>\u4ee3\u4ef7<\/td>\n<\/tr>\n<tr>\n<td>Cost Curve<\/td>\n<td>\u4ee3\u4ef7\u66f2\u7ebf<\/td>\n<\/tr>\n<tr>\n<td>Cost Function<\/td>\n<td>\u4ee3\u4ef7\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Cost Matrix<\/td>\n<td>\u4ee3\u4ef7\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Cost-Sensitive<\/td>\n<td>\u4ee3\u4ef7\u654f\u611f<\/td>\n<\/tr>\n<tr>\n<td>Covariance<\/td>\n<td>\u534f\u65b9\u5dee<\/td>\n<\/tr>\n<tr>\n<td>Covariance Matrix<\/td>\n<td>\u534f\u65b9\u5dee\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Critical Point<\/td>\n<td>\u4e34\u754c\u70b9<\/td>\n<\/tr>\n<tr>\n<td>Cross Entropy<\/td>\n<td>\u4ea4\u53c9\u71b5<\/td>\n<\/tr>\n<tr>\n<td>Cross Validation<\/td>\n<td>\u4ea4\u53c9\u9a8c\u8bc1<\/td>\n<\/tr>\n<tr>\n<td>Curse of Dimensionality<\/td>\n<td>\u7ef4\u6570\u707e\u96be<\/td>\n<\/tr>\n<tr>\n<td>Cutting Plane Algorithm<\/td>\n<td>\u5272\u5e73\u9762\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Data Mining<\/td>\n<td>\u6570\u636e\u6316\u6398<\/td>\n<\/tr>\n<tr>\n<td>Data Set<\/td>\n<td>\u6570\u636e\u96c6<\/td>\n<\/tr>\n<tr>\n<td>Davidon-Fletcher-Powell<\/td>\n<td>DFP<\/td>\n<\/tr>\n<tr>\n<td>Decision Boundary<\/td>\n<td>\u51b3\u7b56\u8fb9\u754c<\/td>\n<\/tr>\n<tr>\n<td>Decision Function<\/td>\n<td>\u51b3\u7b56\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Decision Stump<\/td>\n<td>\u51b3\u7b56\u6811\u6869<\/td>\n<\/tr>\n<tr>\n<td>Decision Tree<\/td>\n<td>\u51b3\u7b56\u6811<\/td>\n<\/tr>\n<tr>\n<td>Decoder<\/td>\n<td>\u89e3\u7801\u5668<\/td>\n<\/tr>\n<tr>\n<td>Decoding<\/td>\n<td>\u89e3\u7801<\/td>\n<\/tr>\n<tr>\n<td>Deconvolution<\/td>\n<td>\u53cd\u5377\u79ef<\/td>\n<\/tr>\n<tr>\n<td>Deconvolutional Network<\/td>\n<td>\u53cd\u5377\u79ef\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Deduction<\/td>\n<td>\u6f14\u7ece<\/td>\n<\/tr>\n<tr>\n<td>Deep Belief Network<\/td>\n<td>\u6df1\u5ea6\u4fe1\u5ff5\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Deep Boltzmann Machine<\/td>\n<td>\u6df1\u5ea6\u73bb\u5c14\u5179\u66fc\u673a<\/td>\n<\/tr>\n<tr>\n<td>Deep Convolutional Generative Adversarial Network<\/td>\n<td>\u6df1\u5ea6\u5377\u79ef\u751f\u6210\u5bf9\u6297\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Deep Learning<\/td>\n<td>\u6df1\u5ea6\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Deep Neural Network<\/td>\n<td>\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Deep Q-Network<\/td>\n<td>\u6df1\u5ea6Q\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Delta-Bar-Delta<\/td>\n<td>Delta-Bar-Delta<\/td>\n<\/tr>\n<tr>\n<td>Denoising<\/td>\n<td>\u53bb\u566a<\/td>\n<\/tr>\n<tr>\n<td>Denoising Autoencoder<\/td>\n<td>\u53bb\u566a\u81ea\u7f16\u7801\u5668<\/td>\n<\/tr>\n<tr>\n<td>Denoising Score Matching<\/td>\n<td>\u53bb\u8e81\u5206\u6570\u5339\u914d<\/td>\n<\/tr>\n<tr>\n<td>Density Estimation<\/td>\n<td>\u5bc6\u5ea6\u4f30\u8ba1<\/td>\n<\/tr>\n<tr>\n<td>Density-Based Clustering<\/td>\n<td>\u5bc6\u5ea6\u805a\u7c7b<\/td>\n<\/tr>\n<tr>\n<td>Derivative<\/td>\n<td>\u5bfc\u6570<\/td>\n<\/tr>\n<tr>\n<td>Determinant<\/td>\n<td>\u884c\u5217\u5f0f<\/td>\n<\/tr>\n<tr>\n<td>Diagonal Matrix<\/td>\n<td>\u5bf9\u89d2\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Dictionary Learning<\/td>\n<td>\u5b57\u5178\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Dimension Reduction<\/td>\n<td>\u964d\u7ef4<\/td>\n<\/tr>\n<tr>\n<td>Directed Edge<\/td>\n<td>\u6709\u5411\u8fb9<\/td>\n<\/tr>\n<tr>\n<td>Directed Graphical Model<\/td>\n<td>\u6709\u5411\u56fe\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Directed Separation<\/td>\n<td>\u6709\u5411\u5206\u79bb<\/td>\n<\/tr>\n<tr>\n<td>Dirichlet Distribution<\/td>\n<td>\u72c4\u5229\u514b\u96f7\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Discriminative Model<\/td>\n<td>\u5224\u522b\u5f0f\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Discriminator<\/td>\n<td>\u5224\u522b\u5668<\/td>\n<\/tr>\n<tr>\n<td>Discriminator Network<\/td>\n<td>\u5224\u522b\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Distance Measure<\/td>\n<td>\u8ddd\u79bb\u5ea6\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Distance Metric Learning<\/td>\n<td>\u8ddd\u79bb\u5ea6\u91cf\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Distributed Representation<\/td>\n<td>\u5206\u5e03\u5f0f\u8868\u793a<\/td>\n<\/tr>\n<tr>\n<td>Diverge<\/td>\n<td>\u53d1\u6563<\/td>\n<\/tr>\n<tr>\n<td>Divergence<\/td>\n<td>\u6563\u5ea6<\/td>\n<\/tr>\n<tr>\n<td>Diversity<\/td>\n<td>\u591a\u6837\u6027<\/td>\n<\/tr>\n<tr>\n<td>Diversity Measure<\/td>\n<td>\u591a\u6837\u6027\u5ea6\u91cf\/\u5dee\u5f02\u6027\u5ea6\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Domain Adaptation<\/td>\n<td>\u9886\u57df\u81ea\u9002\u5e94<\/td>\n<\/tr>\n<tr>\n<td>Dominant Strategy<\/td>\n<td>\u4e3b\u7279\u5f81\u503c<\/td>\n<\/tr>\n<tr>\n<td>Dominant Strategy<\/td>\n<td>\u5360\u4f18\u7b56\u7565<\/td>\n<\/tr>\n<tr>\n<td>Down Sampling<\/td>\n<td>\u4e0b\u91c7\u6837<\/td>\n<\/tr>\n<tr>\n<td>Dropout<\/td>\n<td>\u6682\u9000\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Dropout Boosting<\/td>\n<td>\u6682\u9000Boosting<\/td>\n<\/tr>\n<tr>\n<td>Dropout Method<\/td>\n<td>\u6682\u9000\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Dual Problem<\/td>\n<td>\u5bf9\u5076\u95ee\u9898<\/td>\n<\/tr>\n<tr>\n<td>Dummy Node<\/td>\n<td>\u54d1\u7ed3\u70b9<\/td>\n<\/tr>\n<tr>\n<td>Dynamic Bayesian Network<\/td>\n<td>\u52a8\u6001\u8d1d\u53f6\u65af\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Dynamic Programming<\/td>\n<td>\u52a8\u6001\u89c4\u5212<\/td>\n<\/tr>\n<tr>\n<td>Early Stopping<\/td>\n<td>\u65e9\u505c<\/td>\n<\/tr>\n<tr>\n<td>Eigendecomposition<\/td>\n<td>\u7279\u5f81\u5206\u89e3<\/td>\n<\/tr>\n<tr>\n<td>Eigenvalue<\/td>\n<td>\u7279\u5f81\u503c<\/td>\n<\/tr>\n<tr>\n<td>Element-Wise Product<\/td>\n<td>\u9010\u5143\u7d20\u79ef<\/td>\n<\/tr>\n<tr>\n<td>Embedding<\/td>\n<td>\u5d4c\u5165<\/td>\n<\/tr>\n<tr>\n<td>Empirical Conditional Entropy<\/td>\n<td>\u7ecf\u9a8c\u6761\u4ef6\u71b5<\/td>\n<\/tr>\n<tr>\n<td>Empirical Distribution<\/td>\n<td>\u7ecf\u9a8c\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Empirical Entropy<\/td>\n<td>\u7ecf\u9a8c\u71b5<\/td>\n<\/tr>\n<tr>\n<td>Empirical Error<\/td>\n<td>\u7ecf\u9a8c\u8bef\u5dee<\/td>\n<\/tr>\n<tr>\n<td>Empirical Risk<\/td>\n<td>\u7ecf\u9a8c\u98ce\u9669<\/td>\n<\/tr>\n<tr>\n<td>Empirical Risk Minimization<\/td>\n<td>\u7ecf\u9a8c\u98ce\u9669\u6700\u5c0f\u5316<\/td>\n<\/tr>\n<tr>\n<td>Encoder<\/td>\n<td>\u7f16\u7801\u5668<\/td>\n<\/tr>\n<tr>\n<td>Encoding<\/td>\n<td>\u7f16\u7801<\/td>\n<\/tr>\n<tr>\n<td>End-To-End<\/td>\n<td>\u7aef\u5230\u7aef<\/td>\n<\/tr>\n<tr>\n<td>Energy Function<\/td>\n<td>\u80fd\u91cf\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Energy-Based Model<\/td>\n<td>\u57fa\u4e8e\u80fd\u91cf\u7684\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Ensemble Learning<\/td>\n<td>\u96c6\u6210\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Ensemble Pruning<\/td>\n<td>\u96c6\u6210\u4fee\u526a<\/td>\n<\/tr>\n<tr>\n<td>Entropy<\/td>\n<td>\u71b5<\/td>\n<\/tr>\n<tr>\n<td>Episode<\/td>\n<td>\u56de\u5408<\/td>\n<\/tr>\n<tr>\n<td>Epoch<\/td>\n<td>\u8f6e<\/td>\n<\/tr>\n<tr>\n<td>Error<\/td>\n<td>\u8bef\u5dee<\/td>\n<\/tr>\n<tr>\n<td>Error Backpropagation Algorithm<\/td>\n<td>\u8bef\u5dee\u53cd\u5411\u4f20\u64ad\u7b97\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Error Backpropagation<\/td>\n<td>\u8bef\u5dee\u53cd\u5411\u4f20\u64ad<\/td>\n<\/tr>\n<tr>\n<td>Error Correcting Output Codes<\/td>\n<td>\u7ea0\u9519\u8f93\u51fa\u7f16\u7801<\/td>\n<\/tr>\n<tr>\n<td>Error Rate<\/td>\n<td>\u9519\u8bef\u7387<\/td>\n<\/tr>\n<tr>\n<td>Error-Ambiguity Decomposition<\/td>\n<td>\u8bef\u5dee\uff0d\u5206\u6b67\u5206\u89e3<\/td>\n<\/tr>\n<tr>\n<td>Estimator<\/td>\n<td>\u4f30\u8ba1\/\u4f30\u8ba1\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Euclidean Distance<\/td>\n<td>\u6b27\u6c0f\u8ddd\u79bb<\/td>\n<\/tr>\n<tr>\n<td>Evidence<\/td>\n<td>\u8bc1\u636e<\/td>\n<\/tr>\n<tr>\n<td>Evidence Lower Bound<\/td>\n<td>\u8bc1\u636e\u4e0b\u754c<\/td>\n<\/tr>\n<tr>\n<td>Exact Inference<\/td>\n<td>\u7cbe\u786e\u63a8\u65ad<\/td>\n<\/tr>\n<tr>\n<td>Example<\/td>\n<td>\u6837\u4f8b<\/td>\n<\/tr>\n<tr>\n<td>Expectation<\/td>\n<td>\u671f\u671b<\/td>\n<\/tr>\n<tr>\n<td>Expectation Maximization<\/td>\n<td>\u671f\u671b\u6700\u5927\u5316<\/td>\n<\/tr>\n<tr>\n<td>Expected Loss<\/td>\n<td>\u671f\u671b\u635f\u5931<\/td>\n<\/tr>\n<tr>\n<td>Expert System<\/td>\n<td>\u4e13\u5bb6\u7cfb\u7edf<\/td>\n<\/tr>\n<tr>\n<td>Exploding Gradient<\/td>\n<td>\u68af\u5ea6\u7206\u70b8<\/td>\n<\/tr>\n<tr>\n<td>Exponential Loss Function<\/td>\n<td>\u6307\u6570\u635f\u5931\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Factor<\/td>\n<td>\u56e0\u5b50<\/td>\n<\/tr>\n<tr>\n<td>Factorization<\/td>\n<td>\u56e0\u5b50\u5206\u89e3<\/td>\n<\/tr>\n<tr>\n<td>Feature<\/td>\n<td>\u7279\u5f81<\/td>\n<\/tr>\n<tr>\n<td>Feature Engineering<\/td>\n<td>\u7279\u5f81\u5de5\u7a0b<\/td>\n<\/tr>\n<tr>\n<td>Feature Map<\/td>\n<td>\u7279\u5f81\u56fe<\/td>\n<\/tr>\n<tr>\n<td>Feature Selection<\/td>\n<td>\u7279\u5f81\u9009\u62e9<\/td>\n<\/tr>\n<tr>\n<td>Feature Vector<\/td>\n<td>\u7279\u5f81\u5411\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Featured Learning<\/td>\n<td>\u7279\u5f81\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Feedforward<\/td>\n<td>\u524d\u9988<\/td>\n<\/tr>\n<tr>\n<td>Feedforward Neural Network<\/td>\n<td>\u524d\u9988\u795e\u7ecf\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Few-Shot Learning<\/td>\n<td>\u5c11\u8bd5\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Filter<\/td>\n<td>\u6ee4\u6ce2\u5668<\/td>\n<\/tr>\n<tr>\n<td>Fine-Tuning<\/td>\n<td>\u5fae\u8c03<\/td>\n<\/tr>\n<tr>\n<td>Fluctuation<\/td>\n<td>\u632f\u8361<\/td>\n<\/tr>\n<tr>\n<td>Forget Gate<\/td>\n<td>\u9057\u5fd8\u95e8<\/td>\n<\/tr>\n<tr>\n<td>Forward Propagation<\/td>\n<td>\u524d\u5411\u4f20\u64ad\/\u6b63\u5411\u4f20\u64ad<\/td>\n<\/tr>\n<tr>\n<td>Forward Stagewise Algorithm<\/td>\n<td>\u524d\u5411\u5206\u6b65\u7b97\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Fractionally Strided Convolution<\/td>\n<td>\u5fae\u6b65\u5377\u79ef<\/td>\n<\/tr>\n<tr>\n<td>Frobenius Norm<\/td>\n<td>Frobenius \u8303\u6570<\/td>\n<\/tr>\n<tr>\n<td>Full Padding<\/td>\n<td>\u5168\u586b\u5145<\/td>\n<\/tr>\n<tr>\n<td>Functional<\/td>\n<td>\u6cdb\u51fd<\/td>\n<\/tr>\n<tr>\n<td>Functional Neuron<\/td>\n<td>\u529f\u80fd\u795e\u7ecf\u5143<\/td>\n<\/tr>\n<tr>\n<td>Gated Recurrent Unit<\/td>\n<td>\u95e8\u63a7\u5faa\u73af\u5355\u5143<\/td>\n<\/tr>\n<tr>\n<td>Gated RNN<\/td>\n<td>\u95e8\u63a7RNN<\/td>\n<\/tr>\n<tr>\n<td>Gaussian Distribution<\/td>\n<td>\u9ad8\u65af\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Gaussian Kernel<\/td>\n<td>\u9ad8\u65af\u6838<\/td>\n<\/tr>\n<tr>\n<td>Gaussian Kernel Function<\/td>\n<td>\u9ad8\u65af\u6838\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Gaussian Mixture Model<\/td>\n<td>\u9ad8\u65af\u6df7\u5408\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Gaussian Process<\/td>\n<td>\u9ad8\u65af\u8fc7\u7a0b<\/td>\n<\/tr>\n<tr>\n<td>Generalization Ability<\/td>\n<td>\u6cdb\u5316\u80fd\u529b<\/td>\n<\/tr>\n<tr>\n<td>Generalization Error<\/td>\n<td>\u6cdb\u5316\u8bef\u5dee<\/td>\n<\/tr>\n<tr>\n<td>Generalization Error Bound<\/td>\n<td>\u6cdb\u5316\u8bef\u5dee\u4e0a\u754c<\/td>\n<\/tr>\n<tr>\n<td>Generalize<\/td>\n<td>\u6cdb\u5316<\/td>\n<\/tr>\n<tr>\n<td>Generalized Lagrange Function<\/td>\n<td>\u5e7f\u4e49\u62c9\u683c\u6717\u65e5\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Generalized Linear Model<\/td>\n<td>\u5e7f\u4e49\u7ebf\u6027\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Generalized Rayleigh Quotient<\/td>\n<td>\u5e7f\u4e49\u745e\u5229\u5546<\/td>\n<\/tr>\n<tr>\n<td>Generative Adversarial Network<\/td>\n<td>\u751f\u6210\u5bf9\u6297\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Generative Model<\/td>\n<td>\u751f\u6210\u5f0f\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Generator<\/td>\n<td>\u751f\u6210\u5668<\/td>\n<\/tr>\n<tr>\n<td>Generator Network<\/td>\n<td>\u751f\u6210\u5668\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Genetic Algorithm<\/td>\n<td>\u9057\u4f20\u7b97\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Gibbs Distribution<\/td>\n<td>\u5409\u5e03\u65af\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Gibbs Sampling<\/td>\n<td>\u5409\u5e03\u65af\u91c7\u6837\/\u5409\u5e03\u65af\u62bd\u6837<\/td>\n<\/tr>\n<tr>\n<td>Gini Index<\/td>\n<td>\u57fa\u5c3c\u6307\u6570<\/td>\n<\/tr>\n<tr>\n<td>Global Markov Property<\/td>\n<td>\u5168\u5c40\u9a6c\u5c14\u53ef\u592b\u6027<\/td>\n<\/tr>\n<tr>\n<td>Global Minimum<\/td>\n<td>\u5168\u5c40\u6700\u5c0f<\/td>\n<\/tr>\n<tr>\n<td>Gradient<\/td>\n<td>\u68af\u5ea6<\/td>\n<\/tr>\n<tr>\n<td>Gradient Clipping<\/td>\n<td>\u68af\u5ea6\u622a\u65ad<\/td>\n<\/tr>\n<tr>\n<td>Gradient Descent<\/td>\n<td>\u68af\u5ea6\u4e0b\u964d<\/td>\n<\/tr>\n<tr>\n<td>Gradient Descent Method<\/td>\n<td>\u68af\u5ea6\u4e0b\u964d\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Gradient Exploding Problem<\/td>\n<td>\u68af\u5ea6\u7206\u70b8\u95ee\u9898<\/td>\n<\/tr>\n<tr>\n<td>Gram Matrix<\/td>\n<td>Gram \u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Graph Convolutional Network<\/td>\n<td>\u56fe\u5377\u79ef\u795e\u7ecf\u7f51\u7edc\/\u56fe\u5377\u79ef\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Graph Neural Network<\/td>\n<td>\u56fe\u795e\u7ecf\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Graphical Model<\/td>\n<td>\u56fe\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Grid Search<\/td>\n<td>\u7f51\u683c\u641c\u7d22<\/td>\n<\/tr>\n<tr>\n<td>Ground Truth<\/td>\n<td>\u771f\u5b9e\u503c<\/td>\n<\/tr>\n<tr>\n<td>Hadamard Product<\/td>\n<td>Hadamard\u79ef<\/td>\n<\/tr>\n<tr>\n<td>Hamming Distance<\/td>\n<td>\u6c49\u660e\u8ddd\u79bb<\/td>\n<\/tr>\n<tr>\n<td>Hard Margin<\/td>\n<td>\u786c\u95f4\u9694<\/td>\n<\/tr>\n<tr>\n<td>Hebbian Rule<\/td>\n<td>\u8d6b\u5e03\u6cd5\u5219<\/td>\n<\/tr>\n<tr>\n<td>Hidden Layer<\/td>\n<td>\u9690\u85cf\u5c42<\/td>\n<\/tr>\n<tr>\n<td>Hidden Markov Model<\/td>\n<td>\u9690\u9a6c\u5c14\u53ef\u592b\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Hidden Variable<\/td>\n<td>\u9690\u53d8\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Hierarchical Clustering<\/td>\n<td>\u5c42\u6b21\u805a\u7c7b<\/td>\n<\/tr>\n<tr>\n<td>Hilbert Space<\/td>\n<td>\u5e0c\u5c14\u4f2f\u7279\u7a7a\u95f4<\/td>\n<\/tr>\n<tr>\n<td>Hinge Loss Function<\/td>\n<td>\u5408\u9875\u635f\u5931\u51fd\u6570\/Hinge\u635f\u5931\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Hold-Out<\/td>\n<td>\u7559\u51fa\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Hyperparameter<\/td>\n<td>\u8d85\u53c2\u6570<\/td>\n<\/tr>\n<tr>\n<td>Hyperparameter Optimization<\/td>\n<td>\u8d85\u53c2\u6570\u4f18\u5316<\/td>\n<\/tr>\n<tr>\n<td>Hypothesis<\/td>\n<td>\u5047\u8bbe<\/td>\n<\/tr>\n<tr>\n<td>Hypothesis Space<\/td>\n<td>\u5047\u8bbe\u7a7a\u95f4<\/td>\n<\/tr>\n<tr>\n<td>Hypothesis Test<\/td>\n<td>\u5047\u8bbe\u68c0\u9a8c<\/td>\n<\/tr>\n<tr>\n<td>Identity Matrix<\/td>\n<td>\u5355\u4f4d\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Imitation Learning<\/td>\n<td>\u6a21\u4eff\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Importance Sampling<\/td>\n<td>\u91cd\u8981\u6027\u91c7\u6837<\/td>\n<\/tr>\n<tr>\n<td>Improved Iterative Scaling<\/td>\n<td>\u6539\u8fdb\u7684\u8fed\u4ee3\u5c3a\u5ea6\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Incremental Learning<\/td>\n<td>\u589e\u91cf\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Independent and Identically Distributed<\/td>\n<td>\u72ec\u7acb\u540c\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Indicator Function<\/td>\n<td>\u6307\u793a\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Individual Learner<\/td>\n<td>\u4e2a\u4f53\u5b66\u4e60\u5668<\/td>\n<\/tr>\n<tr>\n<td>Induction<\/td>\n<td>\u5f52\u7eb3<\/td>\n<\/tr>\n<tr>\n<td>Inductive Bias<\/td>\n<td>\u5f52\u7eb3\u504f\u597d<\/td>\n<\/tr>\n<tr>\n<td>Inductive Learning<\/td>\n<td>\u5f52\u7eb3\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Inductive Logic Programming<\/td>\n<td>\u5f52\u7eb3\u903b\u8f91\u7a0b\u5e8f\u8bbe\u8ba1<\/td>\n<\/tr>\n<tr>\n<td>Inference<\/td>\n<td>\u63a8\u65ad<\/td>\n<\/tr>\n<tr>\n<td>Information Entropy<\/td>\n<td>\u4fe1\u606f\u71b5<\/td>\n<\/tr>\n<tr>\n<td>Information Gain<\/td>\n<td>\u4fe1\u606f\u589e\u76ca<\/td>\n<\/tr>\n<tr>\n<td>Inner Product<\/td>\n<td>\u5185\u79ef<\/td>\n<\/tr>\n<tr>\n<td>Instance<\/td>\n<td>\u793a\u4f8b<\/td>\n<\/tr>\n<tr>\n<td>Internal Covariate Shift<\/td>\n<td>\u5185\u90e8\u534f\u53d8\u91cf\u504f\u79fb<\/td>\n<\/tr>\n<tr>\n<td>Inverse Matrix<\/td>\n<td>\u9006\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Inverse Resolution<\/td>\n<td>\u9006\u5f52\u7ed3<\/td>\n<\/tr>\n<tr>\n<td>Isometric Mapping<\/td>\n<td>\u7b49\u5ea6\u91cf\u6620\u5c04<\/td>\n<\/tr>\n<tr>\n<td>Jacobian Matrix<\/td>\n<td>\u96c5\u53ef\u6bd4\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Jensen Inequality<\/td>\n<td>Jensen\u4e0d\u7b49\u5f0f<\/td>\n<\/tr>\n<tr>\n<td>Joint Probability Distribution<\/td>\n<td>\u8054\u5408\u6982\u7387\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>K-Armed Bandit Problem<\/td>\n<td>k-\u6447\u81c2\u8001\u864e\u673a<\/td>\n<\/tr>\n<tr>\n<td>K-Fold Cross Validation<\/td>\n<td>k \u6298\u4ea4\u53c9\u9a8c\u8bc1<\/td>\n<\/tr>\n<tr>\n<td>Karush-Kuhn-Tucker Condition<\/td>\n<td>KKT\u6761\u4ef6<\/td>\n<\/tr>\n<tr>\n<td>Karush\u2013Kuhn\u2013Tucker<\/td>\n<td>Karush\u2013Kuhn\u2013Tucker<\/td>\n<\/tr>\n<tr>\n<td>Kernel Function<\/td>\n<td>\u6838\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Kernel Method<\/td>\n<td>\u6838\u65b9\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Kernel Trick<\/td>\n<td>\u6838\u6280\u5de7<\/td>\n<\/tr>\n<tr>\n<td>Kernelized Linear Discriminant Analysis<\/td>\n<td>\u6838\u7ebf\u6027\u5224\u522b\u5206\u6790<\/td>\n<\/tr>\n<tr>\n<td>KL Divergence<\/td>\n<td>KL\u6563\u5ea6<\/td>\n<\/tr>\n<tr>\n<td>L-BFGS<\/td>\n<td>L-BFGS<\/td>\n<\/tr>\n<tr>\n<td>Label<\/td>\n<td>\u6807\u7b7e<\/td>\n<\/tr>\n<tr>\n<td>Label Space<\/td>\n<td>\u6807\u8bb0\u7a7a\u95f4<\/td>\n<\/tr>\n<tr>\n<td>Lagrange Duality<\/td>\n<td>\u62c9\u683c\u6717\u65e5\u5bf9\u5076\u6027<\/td>\n<\/tr>\n<tr>\n<td>Lagrange Multiplier<\/td>\n<td>\u62c9\u683c\u6717\u65e5\u4e58\u5b50<\/td>\n<\/tr>\n<tr>\n<td>Language Model<\/td>\n<td>\u8bed\u8a00\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Laplace Smoothing<\/td>\n<td>\u62c9\u666e\u62c9\u65af\u5e73\u6ed1<\/td>\n<\/tr>\n<tr>\n<td>Laplacian Correction<\/td>\n<td>\u62c9\u666e\u62c9\u65af\u4fee\u6b63<\/td>\n<\/tr>\n<tr>\n<td>Latent Dirichlet Allocation<\/td>\n<td>\u6f5c\u5728\u72c4\u5229\u514b\u96f7\u5206\u914d<\/td>\n<\/tr>\n<tr>\n<td>Latent Semantic Analysis<\/td>\n<td>\u6f5c\u5728\u8bed\u4e49\u5206\u6790<\/td>\n<\/tr>\n<tr>\n<td>Latent Variable<\/td>\n<td>\u6f5c\u53d8\u91cf\/\u9690\u53d8\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Law of Large Numbers<\/td>\n<td>\u5927\u6570\u5b9a\u5f8b<\/td>\n<\/tr>\n<tr>\n<td>Layer Normalization<\/td>\n<td>\u5c42\u89c4\u8303\u5316<\/td>\n<\/tr>\n<tr>\n<td>Lazy Learning<\/td>\n<td>\u61d2\u60f0\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Leaky Relu<\/td>\n<td>\u6cc4\u6f0f\u4fee\u6b63\u7ebf\u6027\u5355\u5143\/\u6cc4\u6f0f\u6574\u6d41\u7ebf\u6027\u5355\u5143<\/td>\n<\/tr>\n<tr>\n<td>Learner<\/td>\n<td>\u5b66\u4e60\u5668<\/td>\n<\/tr>\n<tr>\n<td>Learning<\/td>\n<td>\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Learning By Analogy<\/td>\n<td>\u7c7b\u6bd4\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Learning Rate<\/td>\n<td>\u5b66\u4e60\u7387<\/td>\n<\/tr>\n<tr>\n<td>Learning Vector Quantization<\/td>\n<td>\u5b66\u4e60\u5411\u91cf\u91cf\u5316<\/td>\n<\/tr>\n<tr>\n<td>Least Square Method<\/td>\n<td>\u6700\u5c0f\u4e8c\u4e58\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Least Squares Regression Tree<\/td>\n<td>\u6700\u5c0f\u4e8c\u4e58\u56de\u5f52\u6811<\/td>\n<\/tr>\n<tr>\n<td>Left Singular Vector<\/td>\n<td>\u5de6\u5947\u5f02\u5411\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Likelihood<\/td>\n<td>\u4f3c\u7136<\/td>\n<\/tr>\n<tr>\n<td>Linear Chain Conditional Random Field<\/td>\n<td>\u7ebf\u6027\u94fe\u6761\u4ef6\u968f\u673a\u573a<\/td>\n<\/tr>\n<tr>\n<td>Linear Classification Model<\/td>\n<td>\u7ebf\u6027\u5206\u7c7b\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Linear Classifier<\/td>\n<td>\u7ebf\u6027\u5206\u7c7b\u5668<\/td>\n<\/tr>\n<tr>\n<td>Linear Dependence<\/td>\n<td>\u7ebf\u6027\u76f8\u5173<\/td>\n<\/tr>\n<tr>\n<td>Linear Discriminant Analysis<\/td>\n<td>\u7ebf\u6027\u5224\u522b\u5206\u6790<\/td>\n<\/tr>\n<tr>\n<td>Linear Model<\/td>\n<td>\u7ebf\u6027\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Linear Regression<\/td>\n<td>\u7ebf\u6027\u56de\u5f52<\/td>\n<\/tr>\n<tr>\n<td>Link Function<\/td>\n<td>\u8054\u7cfb\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Local Markov Property<\/td>\n<td>\u5c40\u90e8\u9a6c\u5c14\u53ef\u592b\u6027<\/td>\n<\/tr>\n<tr>\n<td>Local Minima<\/td>\n<td>\u5c40\u90e8\u6781\u5c0f<\/td>\n<\/tr>\n<tr>\n<td>Local Minimum<\/td>\n<td>\u5c40\u90e8\u6781\u5c0f<\/td>\n<\/tr>\n<tr>\n<td>Local Representation<\/td>\n<td>\u5c40\u90e8\u5f0f\u8868\u793a\/\u5c40\u90e8\u5f0f\u8868\u5f81<\/td>\n<\/tr>\n<tr>\n<td>Log Likelihood<\/td>\n<td>\u5bf9\u6570\u4f3c\u7136\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Log Linear Model<\/td>\n<td>\u5bf9\u6570\u7ebf\u6027\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Log-Likelihood<\/td>\n<td>\u5bf9\u6570\u4f3c\u7136<\/td>\n<\/tr>\n<tr>\n<td>Log-Linear Regression<\/td>\n<td>\u5bf9\u6570\u7ebf\u6027\u56de\u5f52<\/td>\n<\/tr>\n<tr>\n<td>Logistic Function<\/td>\n<td>\u5bf9\u6570\u51e0\u7387\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Logistic Regression<\/td>\n<td>\u5bf9\u6570\u51e0\u7387\u56de\u5f52<\/td>\n<\/tr>\n<tr>\n<td>Logit<\/td>\n<td>\u5bf9\u6570\u51e0\u7387<\/td>\n<\/tr>\n<tr>\n<td>Long Short Term Memory<\/td>\n<td>\u957f\u77ed\u671f\u8bb0\u5fc6<\/td>\n<\/tr>\n<tr>\n<td>Long Short-Term Memory Network<\/td>\n<td>\u957f\u77ed\u671f\u8bb0\u5fc6\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Loopy Belief Propagation<\/td>\n<td>\u73af\u72b6\u4fe1\u5ff5\u4f20\u64ad<\/td>\n<\/tr>\n<tr>\n<td>Loss Function<\/td>\n<td>\u635f\u5931\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Low Rank Matrix Approximation<\/td>\n<td>\u4f4e\u79e9\u77e9\u9635\u8fd1\u4f3c<\/td>\n<\/tr>\n<tr>\n<td>Machine Learning<\/td>\n<td>\u673a\u5668\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Macron-R<\/td>\n<td>\u5b8f\u67e5\u5168\u7387<\/td>\n<\/tr>\n<tr>\n<td>Manhattan Distance<\/td>\n<td>\u66fc\u54c8\u987f\u8ddd\u79bb<\/td>\n<\/tr>\n<tr>\n<td>Manifold<\/td>\n<td>\u6d41\u5f62<\/td>\n<\/tr>\n<tr>\n<td>Manifold Assumption<\/td>\n<td>\u6d41\u5f62\u5047\u8bbe<\/td>\n<\/tr>\n<tr>\n<td>Manifold Learning<\/td>\n<td>\u6d41\u5f62\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Margin<\/td>\n<td>\u95f4\u9694<\/td>\n<\/tr>\n<tr>\n<td>Marginal Distribution<\/td>\n<td>\u8fb9\u7f18\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Marginal Independence<\/td>\n<td>\u8fb9\u7f18\u72ec\u7acb\u6027<\/td>\n<\/tr>\n<tr>\n<td>Marginalization<\/td>\n<td>\u8fb9\u7f18\u5316<\/td>\n<\/tr>\n<tr>\n<td>Markov Chain<\/td>\n<td>\u9a6c\u5c14\u53ef\u592b\u94fe<\/td>\n<\/tr>\n<tr>\n<td>Markov Chain Monte Carlo<\/td>\n<td>\u9a6c\u5c14\u53ef\u592b\u94fe\u8499\u7279\u5361\u7f57<\/td>\n<\/tr>\n<tr>\n<td>Markov Decision Process<\/td>\n<td>\u9a6c\u5c14\u53ef\u592b\u51b3\u7b56\u8fc7\u7a0b<\/td>\n<\/tr>\n<tr>\n<td>Markov Network<\/td>\n<td>\u9a6c\u5c14\u53ef\u592b\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Markov Process<\/td>\n<td>\u9a6c\u5c14\u53ef\u592b\u8fc7\u7a0b<\/td>\n<\/tr>\n<tr>\n<td>Markov Random Field<\/td>\n<td>\u9a6c\u5c14\u53ef\u592b\u968f\u673a\u573a<\/td>\n<\/tr>\n<tr>\n<td>Mask<\/td>\n<td>\u63a9\u7801<\/td>\n<\/tr>\n<tr>\n<td>Matrix<\/td>\n<td>\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Matrix Inversion<\/td>\n<td>\u9006\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Max Pooling<\/td>\n<td>\u6700\u5927\u6c47\u805a<\/td>\n<\/tr>\n<tr>\n<td>Maximal Clique<\/td>\n<td>\u6700\u5927\u56e2<\/td>\n<\/tr>\n<tr>\n<td>Maximum Entropy Model<\/td>\n<td>\u6700\u5927\u71b5\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Maximum Likelihood Estimation<\/td>\n<td>\u6781\u5927\u4f3c\u7136\u4f30\u8ba1<\/td>\n<\/tr>\n<tr>\n<td>Maximum Margin<\/td>\n<td>\u6700\u5927\u95f4\u9694<\/td>\n<\/tr>\n<tr>\n<td>Mean Filed<\/td>\n<td>\u5e73\u5747\u573a<\/td>\n<\/tr>\n<tr>\n<td>Mean Pooling<\/td>\n<td>\u5e73\u5747\u6c47\u805a<\/td>\n<\/tr>\n<tr>\n<td>Mean Squared Error<\/td>\n<td>\u5747\u65b9\u8bef\u5dee<\/td>\n<\/tr>\n<tr>\n<td>Mean-Field<\/td>\n<td>\u5e73\u5747\u573a<\/td>\n<\/tr>\n<tr>\n<td>Memory Network<\/td>\n<td>\u8bb0\u5fc6\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Message Passing<\/td>\n<td>\u6d88\u606f\u4f20\u9012<\/td>\n<\/tr>\n<tr>\n<td>Metric Learning<\/td>\n<td>\u5ea6\u91cf\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Micro-R<\/td>\n<td>\u5fae\u67e5\u5168\u7387<\/td>\n<\/tr>\n<tr>\n<td>Minibatch<\/td>\n<td>\u5c0f\u6279\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Minimal Description Length<\/td>\n<td>\u6700\u5c0f\u63cf\u8ff0\u957f\u5ea6<\/td>\n<\/tr>\n<tr>\n<td>Minimax Game<\/td>\n<td>\u6781\u5c0f\u6781\u5927\u535a\u5f08<\/td>\n<\/tr>\n<tr>\n<td>Minkowski Distance<\/td>\n<td>\u95f5\u53ef\u592b\u65af\u57fa\u8ddd\u79bb<\/td>\n<\/tr>\n<tr>\n<td>Mixture of Experts<\/td>\n<td>\u6df7\u5408\u4e13\u5bb6\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Mixture-of-Gaussian<\/td>\n<td>\u9ad8\u65af\u6df7\u5408<\/td>\n<\/tr>\n<tr>\n<td>Model<\/td>\n<td>\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Model Selection<\/td>\n<td>\u6a21\u578b\u9009\u62e9<\/td>\n<\/tr>\n<tr>\n<td>Momentum Method<\/td>\n<td>\u52a8\u91cf\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Monte Carlo Method<\/td>\n<td>\u8499\u7279\u5361\u7f57\u65b9\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Moral Graph<\/td>\n<td>\u7aef\u6b63\u56fe\/\u9053\u5fb7\u56fe<\/td>\n<\/tr>\n<tr>\n<td>Moralization<\/td>\n<td>\u9053\u5fb7\u5316<\/td>\n<\/tr>\n<tr>\n<td>Multi-Class Classification<\/td>\n<td>\u591a\u5206\u7c7b<\/td>\n<\/tr>\n<tr>\n<td>Multi-Head Attention<\/td>\n<td>\u591a\u5934\u6ce8\u610f\u529b<\/td>\n<\/tr>\n<tr>\n<td>Multi-Head Self-Attention<\/td>\n<td>\u591a\u5934\u81ea\u6ce8\u610f\u529b<\/td>\n<\/tr>\n<tr>\n<td>Multi-Kernel Learning<\/td>\n<td>\u591a\u6838\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Multi-Label Learning<\/td>\n<td>\u591a\u6807\u8bb0\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Multi-Layer Feedforward Neural Networks<\/td>\n<td>\u591a\u5c42\u524d\u9988\u795e\u7ecf\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Multi-Layer Perceptron<\/td>\n<td>\u591a\u5c42\u611f\u77e5\u673a<\/td>\n<\/tr>\n<tr>\n<td>Multinomial Distribution<\/td>\n<td>\u591a\u9879\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Multiple Dimensional Scaling<\/td>\n<td>\u591a\u7ef4\u7f29\u653e<\/td>\n<\/tr>\n<tr>\n<td>Multiple Linear Regression<\/td>\n<td>\u591a\u5143\u7ebf\u6027\u56de\u5f52<\/td>\n<\/tr>\n<tr>\n<td>Multitask Learning<\/td>\n<td>\u591a\u4efb\u52a1\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Multivariate Normal Distribution<\/td>\n<td>\u591a\u5143\u6b63\u6001\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Mutual Information<\/td>\n<td>\u4e92\u4fe1\u606f<\/td>\n<\/tr>\n<tr>\n<td>N-Gram Model<\/td>\n<td>N\u5143\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Naive Bayes Classifier<\/td>\n<td>\u6734\u7d20\u8d1d\u53f6\u65af\u5206\u7c7b\u5668<\/td>\n<\/tr>\n<tr>\n<td>Naive Bayes<\/td>\n<td>\u6734\u7d20\u8d1d\u53f6\u65af<\/td>\n<\/tr>\n<tr>\n<td>Nearest Neighbor Classifier<\/td>\n<td>\u6700\u8fd1\u90bb\u5206\u7c7b\u5668<\/td>\n<\/tr>\n<tr>\n<td>Negative Log Likelihood<\/td>\n<td>\u8d1f\u5bf9\u6570\u4f3c\u7136\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Neighbourhood Component Analysis<\/td>\n<td>\u8fd1\u90bb\u6210\u5206\u5206\u6790<\/td>\n<\/tr>\n<tr>\n<td>Net Input<\/td>\n<td>\u51c0\u8f93\u5165<\/td>\n<\/tr>\n<tr>\n<td>Neural Network<\/td>\n<td>\u795e\u7ecf\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Neural Turing Machine<\/td>\n<td>\u795e\u7ecf\u56fe\u7075\u673a<\/td>\n<\/tr>\n<tr>\n<td>Neuron<\/td>\n<td>\u795e\u7ecf\u5143<\/td>\n<\/tr>\n<tr>\n<td>Newton Method<\/td>\n<td>\u725b\u987f\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>No Free Lunch Theorem<\/td>\n<td>\u6ca1\u6709\u514d\u8d39\u5348\u9910\u5b9a\u7406<\/td>\n<\/tr>\n<tr>\n<td>Noise-Contrastive Estimation<\/td>\n<td>\u566a\u58f0\u5bf9\u6bd4\u4f30\u8ba1<\/td>\n<\/tr>\n<tr>\n<td>Nominal Attribute<\/td>\n<td>\u5217\u540d\u5c5e\u6027<\/td>\n<\/tr>\n<tr>\n<td>Non-Convex Optimization<\/td>\n<td>\u975e\u51f8\u4f18\u5316<\/td>\n<\/tr>\n<tr>\n<td>Non-Metric Distance<\/td>\n<td>\u975e\u5ea6\u91cf\u8ddd\u79bb<\/td>\n<\/tr>\n<tr>\n<td>Non-Negative Matrix Factorization<\/td>\n<td>\u975e\u8d1f\u77e9\u9635\u5206\u89e3<\/td>\n<\/tr>\n<tr>\n<td>Non-Ordinal Attribute<\/td>\n<td>\u65e0\u5e8f\u5c5e\u6027<\/td>\n<\/tr>\n<tr>\n<td>Norm<\/td>\n<td>\u8303\u6570<\/td>\n<\/tr>\n<tr>\n<td>Normal Distribution<\/td>\n<td>\u6b63\u6001\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Normalization<\/td>\n<td>\u89c4\u8303\u5316<\/td>\n<\/tr>\n<tr>\n<td>Nuclear Norm<\/td>\n<td>\u6838\u8303\u6570<\/td>\n<\/tr>\n<tr>\n<td>Number of Epochs<\/td>\n<td>\u8f6e\u6570<\/td>\n<\/tr>\n<tr>\n<td>Numerical Attribute<\/td>\n<td>\u6570\u503c\u5c5e\u6027<\/td>\n<\/tr>\n<tr>\n<td>Object Detection<\/td>\n<td>\u76ee\u6807\u68c0\u6d4b<\/td>\n<\/tr>\n<tr>\n<td>Oblique Decision Tree<\/td>\n<td>\u659c\u51b3\u7b56\u6811<\/td>\n<\/tr>\n<tr>\n<td>Occam&#8217;s Razor<\/td>\n<td>\u5965\u5361\u59c6\u5243\u5200<\/td>\n<\/tr>\n<tr>\n<td>Odds<\/td>\n<td>\u51e0\u7387<\/td>\n<\/tr>\n<tr>\n<td>Off-Policy<\/td>\n<td>\u5f02\u7b56\u7565<\/td>\n<\/tr>\n<tr>\n<td>On-Policy<\/td>\n<td>\u540c\u7b56\u7565<\/td>\n<\/tr>\n<tr>\n<td>One-Dependent Estimator<\/td>\n<td>\u72ec\u4f9d\u8d56\u4f30\u8ba1<\/td>\n<\/tr>\n<tr>\n<td>One-Hot<\/td>\n<td>\u72ec\u70ed<\/td>\n<\/tr>\n<tr>\n<td>Online Learning<\/td>\n<td>\u5728\u7ebf\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Optimizer<\/td>\n<td>\u4f18\u5316\u5668<\/td>\n<\/tr>\n<tr>\n<td>Ordinal Attribute<\/td>\n<td>\u6709\u5e8f\u5c5e\u6027<\/td>\n<\/tr>\n<tr>\n<td>Orthogonal<\/td>\n<td>\u6b63\u4ea4<\/td>\n<\/tr>\n<tr>\n<td>Orthogonal Matrix<\/td>\n<td>\u6b63\u4ea4\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Out-Of-Bag Estimate<\/td>\n<td>\u5305\u5916\u4f30\u8ba1<\/td>\n<\/tr>\n<tr>\n<td>Outlier<\/td>\n<td>\u5f02\u5e38\u70b9<\/td>\n<\/tr>\n<tr>\n<td>Over-Parameterized<\/td>\n<td>\u8fc7\u5ea6\u53c2\u6570\u5316<\/td>\n<\/tr>\n<tr>\n<td>Overfitting<\/td>\n<td>\u8fc7\u62df\u5408<\/td>\n<\/tr>\n<tr>\n<td>Oversampling<\/td>\n<td>\u8fc7\u91c7\u6837<\/td>\n<\/tr>\n<tr>\n<td>Pac-Learnable<\/td>\n<td>PAC\u53ef\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Padding<\/td>\n<td>\u586b\u5145<\/td>\n<\/tr>\n<tr>\n<td>Pairwise Markov Property<\/td>\n<td>\u6210\u5bf9\u9a6c\u5c14\u53ef\u592b\u6027<\/td>\n<\/tr>\n<tr>\n<td>Parallel Distributed Processing<\/td>\n<td>\u5206\u5e03\u5f0f\u5e76\u884c\u5904\u7406<\/td>\n<\/tr>\n<tr>\n<td>Parameter<\/td>\n<td>\u53c2\u6570<\/td>\n<\/tr>\n<tr>\n<td>Parameter Estimation<\/td>\n<td>\u53c2\u6570\u4f30\u8ba1<\/td>\n<\/tr>\n<tr>\n<td>Parameter Space<\/td>\n<td>\u53c2\u6570\u7a7a\u95f4<\/td>\n<\/tr>\n<tr>\n<td>Parameter Tuning<\/td>\n<td>\u8c03\u53c2<\/td>\n<\/tr>\n<tr>\n<td>Parametric ReLU<\/td>\n<td>\u53c2\u6570\u5316\u4fee\u6b63\u7ebf\u6027\u5355\u5143\/\u53c2\u6570\u5316\u6574\u6d41\u7ebf\u6027\u5355\u5143<\/td>\n<\/tr>\n<tr>\n<td>Part-Of-Speech Tagging<\/td>\n<td>\u8bcd\u6027\u6807\u6ce8<\/td>\n<\/tr>\n<tr>\n<td>Partial Derivative<\/td>\n<td>\u504f\u5bfc\u6570<\/td>\n<\/tr>\n<tr>\n<td>Partially Observable Markov Decision Processes<\/td>\n<td>\u90e8\u5206\u53ef\u89c2\u6d4b\u9a6c\u5c14\u53ef\u592b\u51b3\u7b56\u8fc7\u7a0b<\/td>\n<\/tr>\n<tr>\n<td>Partition Function<\/td>\n<td>\u914d\u5206\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Perceptron<\/td>\n<td>\u611f\u77e5\u673a<\/td>\n<\/tr>\n<tr>\n<td>Performance Measure<\/td>\n<td>\u6027\u80fd\u5ea6\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Perplexity<\/td>\n<td>\u56f0\u60d1\u5ea6<\/td>\n<\/tr>\n<tr>\n<td>Pointer Network<\/td>\n<td>\u6307\u9488\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Policy<\/td>\n<td>\u7b56\u7565<\/td>\n<\/tr>\n<tr>\n<td>Policy Gradient<\/td>\n<td>\u7b56\u7565\u68af\u5ea6<\/td>\n<\/tr>\n<tr>\n<td>Policy Iteration<\/td>\n<td>\u7b56\u7565\u8fed\u4ee3<\/td>\n<\/tr>\n<tr>\n<td>Polynomial Kernel Function<\/td>\n<td>\u591a\u9879\u5f0f\u6838\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Pooling<\/td>\n<td>\u6c47\u805a<\/td>\n<\/tr>\n<tr>\n<td>Pooling Layer<\/td>\n<td>\u6c47\u805a\u5c42<\/td>\n<\/tr>\n<tr>\n<td>Positive Definite Matrix<\/td>\n<td>\u6b63\u5b9a\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Post-Pruning<\/td>\n<td>\u540e\u526a\u679d<\/td>\n<\/tr>\n<tr>\n<td>Potential Function<\/td>\n<td>\u52bf\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Power Method<\/td>\n<td>\u5e42\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Pre-Training<\/td>\n<td>\u9884\u8bad\u7ec3<\/td>\n<\/tr>\n<tr>\n<td>Precision<\/td>\n<td>\u67e5\u51c6\u7387\/\u51c6\u786e\u7387<\/td>\n<\/tr>\n<tr>\n<td>Prepruning<\/td>\n<td>\u9884\u526a\u679d<\/td>\n<\/tr>\n<tr>\n<td>Primal Problem<\/td>\n<td>\u4e3b\u95ee\u9898<\/td>\n<\/tr>\n<tr>\n<td>Primary Visual Cortex<\/td>\n<td>\u521d\u7ea7\u89c6\u89c9\u76ae\u5c42<\/td>\n<\/tr>\n<tr>\n<td>Principal Component Analysis<\/td>\n<td>\u4e3b\u6210\u5206\u5206\u6790<\/td>\n<\/tr>\n<tr>\n<td>Prior<\/td>\n<td>\u5148\u9a8c<\/td>\n<\/tr>\n<tr>\n<td>Probabilistic Context-Free Grammar<\/td>\n<td>\u6982\u7387\u4e0a\u4e0b\u6587\u65e0\u5173\u6587\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Probabilistic Graphical Model<\/td>\n<td>\u6982\u7387\u56fe\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Probabilistic Model<\/td>\n<td>\u6982\u7387\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Probability Density Function<\/td>\n<td>\u6982\u7387\u5bc6\u5ea6\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Probability Distribution<\/td>\n<td>\u6982\u7387\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Probably Approximately Correct<\/td>\n<td>\u6982\u7387\u8fd1\u4f3c\u6b63\u786e<\/td>\n<\/tr>\n<tr>\n<td>Proposal Distribution<\/td>\n<td>\u63d0\u8bae\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Prototype-Based Clustering<\/td>\n<td>\u539f\u578b\u805a\u7c7b<\/td>\n<\/tr>\n<tr>\n<td>Proximal Gradient Descent<\/td>\n<td>\u8fd1\u7aef\u68af\u5ea6\u4e0b\u964d<\/td>\n<\/tr>\n<tr>\n<td>Pruning<\/td>\n<td>\u526a\u679d<\/td>\n<\/tr>\n<tr>\n<td>Quadratic Loss Function<\/td>\n<td>\u5e73\u65b9\u635f\u5931\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Quadratic Programming<\/td>\n<td>\u4e8c\u6b21\u89c4\u5212<\/td>\n<\/tr>\n<tr>\n<td>Quasi Newton Method<\/td>\n<td>\u62df\u725b\u987f\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Radial Basis Function<\/td>\n<td>\u5f84\u5411\u57fa\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Random Forest<\/td>\n<td>\u968f\u673a\u68ee\u6797<\/td>\n<\/tr>\n<tr>\n<td>Random Sampling<\/td>\n<td>\u968f\u673a\u91c7\u6837<\/td>\n<\/tr>\n<tr>\n<td>Random Search<\/td>\n<td>\u968f\u673a\u641c\u7d22<\/td>\n<\/tr>\n<tr>\n<td>Random Variable<\/td>\n<td>\u968f\u673a\u53d8\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Random Walk<\/td>\n<td>\u968f\u673a\u6e38\u8d70<\/td>\n<\/tr>\n<tr>\n<td>Recall<\/td>\n<td>\u67e5\u5168\u7387\/\u53ec\u56de\u7387<\/td>\n<\/tr>\n<tr>\n<td>Receptive Field<\/td>\n<td>\u611f\u53d7\u91ce<\/td>\n<\/tr>\n<tr>\n<td>Reconstruction Error<\/td>\n<td>\u91cd\u6784\u8bef\u5dee<\/td>\n<\/tr>\n<tr>\n<td>Rectified Linear Unit<\/td>\n<td>\u4fee\u6b63\u7ebf\u6027\u5355\u5143\/\u6574\u6d41\u7ebf\u6027\u5355\u5143<\/td>\n<\/tr>\n<tr>\n<td>Recurrent Neural Network<\/td>\n<td>\u5faa\u73af\u795e\u7ecf\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Recursive Neural Network<\/td>\n<td>\u9012\u5f52\u795e\u7ecf\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Regression<\/td>\n<td>\u56de\u5f52<\/td>\n<\/tr>\n<tr>\n<td>Regularization<\/td>\n<td>\u6b63\u5219\u5316<\/td>\n<\/tr>\n<tr>\n<td>Regularizer<\/td>\n<td>\u6b63\u5219\u5316\u9879<\/td>\n<\/tr>\n<tr>\n<td>Reinforcement Learning<\/td>\n<td>\u5f3a\u5316\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Relative Entropy<\/td>\n<td>\u76f8\u5bf9\u71b5<\/td>\n<\/tr>\n<tr>\n<td>Reparameterization<\/td>\n<td>\u518d\u53c2\u6570\u5316\/\u91cd\u53c2\u6570\u5316<\/td>\n<\/tr>\n<tr>\n<td>Representation<\/td>\n<td>\u8868\u793a<\/td>\n<\/tr>\n<tr>\n<td>Representation Learning<\/td>\n<td>\u8868\u793a\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Representer Theorem<\/td>\n<td>\u8868\u793a\u5b9a\u7406<\/td>\n<\/tr>\n<tr>\n<td>Reproducing Kernel Hilbert Space<\/td>\n<td>\u518d\u751f\u6838\u5e0c\u5c14\u4f2f\u7279\u7a7a\u95f4<\/td>\n<\/tr>\n<tr>\n<td>Rescaling<\/td>\n<td>\u518d\u7f29\u653e<\/td>\n<\/tr>\n<tr>\n<td>Reset Gate<\/td>\n<td>\u91cd\u7f6e\u95e8<\/td>\n<\/tr>\n<tr>\n<td>Residual Connection<\/td>\n<td>\u6b8b\u5dee\u8fde\u63a5<\/td>\n<\/tr>\n<tr>\n<td>Residual Network<\/td>\n<td>\u6b8b\u5dee\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Restricted Boltzmann Machine<\/td>\n<td>\u53d7\u9650\u73bb\u5c14\u5179\u66fc\u673a<\/td>\n<\/tr>\n<tr>\n<td>Reward<\/td>\n<td>\u5956\u52b1<\/td>\n<\/tr>\n<tr>\n<td>Ridge Regression<\/td>\n<td>\u5cad\u56de\u5f52<\/td>\n<\/tr>\n<tr>\n<td>Right Singular Vector<\/td>\n<td>\u53f3\u5947\u5f02\u5411\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Risk<\/td>\n<td>\u98ce\u9669<\/td>\n<\/tr>\n<tr>\n<td>Robustness<\/td>\n<td>\u7a33\u5065\u6027<\/td>\n<\/tr>\n<tr>\n<td>Root Node<\/td>\n<td>\u6839\u7ed3\u70b9<\/td>\n<\/tr>\n<tr>\n<td>Rule Learning<\/td>\n<td>\u89c4\u5219\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Saddle Point<\/td>\n<td>\u978d\u70b9<\/td>\n<\/tr>\n<tr>\n<td>Sample<\/td>\n<td>\u6837\u672c<\/td>\n<\/tr>\n<tr>\n<td>Sample Complexity<\/td>\n<td>\u6837\u672c\u590d\u6742\u5ea6<\/td>\n<\/tr>\n<tr>\n<td>Sample Space<\/td>\n<td>\u6837\u672c\u7a7a\u95f4<\/td>\n<\/tr>\n<tr>\n<td>Scalar<\/td>\n<td>\u6807\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Selective Ensemble<\/td>\n<td>\u9009\u62e9\u6027\u96c6\u6210<\/td>\n<\/tr>\n<tr>\n<td>Self Information<\/td>\n<td>\u81ea\u4fe1\u606f<\/td>\n<\/tr>\n<tr>\n<td>Self-Attention<\/td>\n<td>\u81ea\u6ce8\u610f\u529b<\/td>\n<\/tr>\n<tr>\n<td>Self-Organizing Map<\/td>\n<td>\u81ea\u7ec4\u7ec7\u6620\u5c04\u7f51<\/td>\n<\/tr>\n<tr>\n<td>Self-Training<\/td>\n<td>\u81ea\u8bad\u7ec3<\/td>\n<\/tr>\n<tr>\n<td>Semi-Definite Programming<\/td>\n<td>\u534a\u6b63\u5b9a\u89c4\u5212<\/td>\n<\/tr>\n<tr>\n<td>Semi-Naive Bayes Classifiers<\/td>\n<td>\u534a\u6734\u7d20\u8d1d\u53f6\u65af\u5206\u7c7b\u5668<\/td>\n<\/tr>\n<tr>\n<td>Semi-Restricted Boltzmann Machine<\/td>\n<td>\u534a\u53d7\u9650\u73bb\u5c14\u5179\u66fc\u673a<\/td>\n<\/tr>\n<tr>\n<td>Semi-Supervised Clustering<\/td>\n<td>\u534a\u76d1\u7763\u805a\u7c7b<\/td>\n<\/tr>\n<tr>\n<td>Semi-Supervised Learning<\/td>\n<td>\u534a\u76d1\u7763\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Semi-Supervised Support Vector Machine<\/td>\n<td>\u534a\u76d1\u7763\u652f\u6301\u5411\u91cf\u673a<\/td>\n<\/tr>\n<tr>\n<td>Sentiment Analysis<\/td>\n<td>\u60c5\u611f\u5206\u6790<\/td>\n<\/tr>\n<tr>\n<td>Separating Hyperplane<\/td>\n<td>\u5206\u79bb\u8d85\u5e73\u9762<\/td>\n<\/tr>\n<tr>\n<td>Sequential Covering<\/td>\n<td>\u5e8f\u8d2f\u8986\u76d6<\/td>\n<\/tr>\n<tr>\n<td>Sigmoid Belief Network<\/td>\n<td>Sigmoid\u4fe1\u5ff5\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Sigmoid Function<\/td>\n<td>Sigmoid\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Signed Distance<\/td>\n<td>\u5e26\u7b26\u53f7\u8ddd\u79bb<\/td>\n<\/tr>\n<tr>\n<td>Similarity Measure<\/td>\n<td>\u76f8\u4f3c\u5ea6\u5ea6\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Simulated Annealing<\/td>\n<td>\u6a21\u62df\u9000\u706b<\/td>\n<\/tr>\n<tr>\n<td>Simultaneous Localization And Mapping<\/td>\n<td>\u5373\u65f6\u5b9a\u4f4d\u4e0e\u5730\u56fe\u6784\u5efa<\/td>\n<\/tr>\n<tr>\n<td>Singular Value<\/td>\n<td>\u5947\u5f02\u503c<\/td>\n<\/tr>\n<tr>\n<td>Singular Value Decomposition<\/td>\n<td>\u5947\u5f02\u503c\u5206\u89e3<\/td>\n<\/tr>\n<tr>\n<td>Skip-Gram Model<\/td>\n<td>\u8df3\u5143\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Smoothing<\/td>\n<td>\u5e73\u6ed1<\/td>\n<\/tr>\n<tr>\n<td>Soft Margin<\/td>\n<td>\u8f6f\u95f4\u9694<\/td>\n<\/tr>\n<tr>\n<td>Soft Margin Maximization<\/td>\n<td>\u8f6f\u95f4\u9694\u6700\u5927\u5316<\/td>\n<\/tr>\n<tr>\n<td>Softmax<\/td>\n<td>Softmax\/\u8f6f\u6700\u5927\u5316<\/td>\n<\/tr>\n<tr>\n<td>Softmax Function<\/td>\n<td>Softmax\u51fd\u6570\/\u8f6f\u6700\u5927\u5316\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Softmax Regression<\/td>\n<td>Softmax\u56de\u5f52\/\u8f6f\u6700\u5927\u5316\u56de\u5f52<\/td>\n<\/tr>\n<tr>\n<td>Softplus Function<\/td>\n<td>Softplus\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Span<\/td>\n<td>\u5f20\u6210\u5b50\u7a7a\u95f4<\/td>\n<\/tr>\n<tr>\n<td>Sparse Coding<\/td>\n<td>\u7a00\u758f\u7f16\u7801<\/td>\n<\/tr>\n<tr>\n<td>Sparse Representation<\/td>\n<td>\u7a00\u758f\u8868\u793a<\/td>\n<\/tr>\n<tr>\n<td>Sparsity<\/td>\n<td>\u7a00\u758f\u6027<\/td>\n<\/tr>\n<tr>\n<td>Specialization<\/td>\n<td>\u7279\u5316<\/td>\n<\/tr>\n<tr>\n<td>Splitting Variable<\/td>\n<td>\u5207\u5206\u53d8\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Squashing Function<\/td>\n<td>\u6324\u538b\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Standard Normal Distribution<\/td>\n<td>\u6807\u51c6\u6b63\u6001\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>State<\/td>\n<td>\u72b6\u6001<\/td>\n<\/tr>\n<tr>\n<td>State Value Function<\/td>\n<td>\u72b6\u6001\u503c\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>State-Action Value Function<\/td>\n<td>\u72b6\u6001-\u52a8\u4f5c\u503c\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Stationary Distribution<\/td>\n<td>\u5e73\u7a33\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Stationary Point<\/td>\n<td>\u9a7b\u70b9<\/td>\n<\/tr>\n<tr>\n<td>Statistical Learning<\/td>\n<td>\u7edf\u8ba1\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Steepest Descent<\/td>\n<td>\u6700\u901f\u4e0b\u964d\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Stochastic Gradient Descent<\/td>\n<td>\u968f\u673a\u68af\u5ea6\u4e0b\u964d<\/td>\n<\/tr>\n<tr>\n<td>Stochastic Matrix<\/td>\n<td>\u968f\u673a\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Stochastic Process<\/td>\n<td>\u968f\u673a\u8fc7\u7a0b<\/td>\n<\/tr>\n<tr>\n<td>Stratified Sampling<\/td>\n<td>\u5206\u5c42\u91c7\u6837<\/td>\n<\/tr>\n<tr>\n<td>Stride<\/td>\n<td>\u6b65\u5e45<\/td>\n<\/tr>\n<tr>\n<td>Structural Risk<\/td>\n<td>\u7ed3\u6784\u98ce\u9669<\/td>\n<\/tr>\n<tr>\n<td>Structural Risk Minimization<\/td>\n<td>\u7ed3\u6784\u98ce\u9669\u6700\u5c0f\u5316<\/td>\n<\/tr>\n<tr>\n<td>Subsample<\/td>\n<td>\u5b50\u91c7\u6837<\/td>\n<\/tr>\n<tr>\n<td>Subsampling<\/td>\n<td>\u4e0b\u91c7\u6837<\/td>\n<\/tr>\n<tr>\n<td>Subset Search<\/td>\n<td>\u5b50\u96c6\u641c\u7d22<\/td>\n<\/tr>\n<tr>\n<td>Subspace<\/td>\n<td>\u5b50\u7a7a\u95f4<\/td>\n<\/tr>\n<tr>\n<td>Supervised Learning<\/td>\n<td>\u76d1\u7763\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Support Vector<\/td>\n<td>\u652f\u6301\u5411\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Support Vector Expansion<\/td>\n<td>\u652f\u6301\u5411\u91cf\u5c55\u5f0f<\/td>\n<\/tr>\n<tr>\n<td>Support Vector Machine<\/td>\n<td>\u652f\u6301\u5411\u91cf\u673a<\/td>\n<\/tr>\n<tr>\n<td>Surrogat Loss<\/td>\n<td>\u66ff\u4ee3\u635f\u5931<\/td>\n<\/tr>\n<tr>\n<td>Surrogate Function<\/td>\n<td>\u66ff\u4ee3\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Surrogate Loss Function<\/td>\n<td>\u4ee3\u7406\u635f\u5931\u51fd\u6570<\/td>\n<\/tr>\n<tr>\n<td>Symbolism<\/td>\n<td>\u7b26\u53f7\u4e3b\u4e49<\/td>\n<\/tr>\n<tr>\n<td>Tangent Propagation<\/td>\n<td>\u6b63\u5207\u4f20\u64ad<\/td>\n<\/tr>\n<tr>\n<td>Teacher Forcing<\/td>\n<td>\u5f3a\u5236\u6559\u5b66<\/td>\n<\/tr>\n<tr>\n<td>Temporal-Difference Learning<\/td>\n<td>\u65f6\u5e8f\u5dee\u5206\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Tensor<\/td>\n<td>\u5f20\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Test Error<\/td>\n<td>\u6d4b\u8bd5\u8bef\u5dee<\/td>\n<\/tr>\n<tr>\n<td>Test Sample<\/td>\n<td>\u6d4b\u8bd5\u6837\u672c<\/td>\n<\/tr>\n<tr>\n<td>Test Set<\/td>\n<td>\u6d4b\u8bd5\u96c6<\/td>\n<\/tr>\n<tr>\n<td>Threshold<\/td>\n<td>\u9608\u503c<\/td>\n<\/tr>\n<tr>\n<td>Threshold Logic Unit<\/td>\n<td>\u9608\u503c\u903b\u8f91\u5355\u5143<\/td>\n<\/tr>\n<tr>\n<td>Threshold-Moving<\/td>\n<td>\u9608\u503c\u79fb\u52a8<\/td>\n<\/tr>\n<tr>\n<td>Tied Weight<\/td>\n<td>\u6346\u7ed1\u6743\u91cd<\/td>\n<\/tr>\n<tr>\n<td>Tikhonov Regularization<\/td>\n<td>Tikhonov\u6b63\u5219\u5316<\/td>\n<\/tr>\n<tr>\n<td>Time Delay Neural Network<\/td>\n<td>\u65f6\u5ef6\u795e\u7ecf\u7f51\u7edc<\/td>\n<\/tr>\n<tr>\n<td>Time Homogenous Markov Chain<\/td>\n<td>\u65f6\u95f4\u9f50\u6b21\u9a6c\u5c14\u53ef\u592b\u94fe<\/td>\n<\/tr>\n<tr>\n<td>Time Step<\/td>\n<td>\u65f6\u95f4\u6b65<\/td>\n<\/tr>\n<tr>\n<td>Token<\/td>\n<td>\u8bcd\u5143<\/td>\n<\/tr>\n<tr>\n<td>Token<\/td>\n<td>\u8bcd\u5143<\/td>\n<\/tr>\n<tr>\n<td>Tokenization<\/td>\n<td>\u8bcd\u5143\u5316<\/td>\n<\/tr>\n<tr>\n<td>Tokenizer<\/td>\n<td>\u8bcd\u5143\u5206\u6790\u5668<\/td>\n<\/tr>\n<tr>\n<td>Topic Model<\/td>\n<td>\u8bdd\u9898\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Topic Modeling<\/td>\n<td>\u8bdd\u9898\u5206\u6790<\/td>\n<\/tr>\n<tr>\n<td>Trace<\/td>\n<td>\u8ff9<\/td>\n<\/tr>\n<tr>\n<td>Training<\/td>\n<td>\u8bad\u7ec3<\/td>\n<\/tr>\n<tr>\n<td>Training Error<\/td>\n<td>\u8bad\u7ec3\u8bef\u5dee<\/td>\n<\/tr>\n<tr>\n<td>Training Sample<\/td>\n<td>\u8bad\u7ec3\u6837\u672c<\/td>\n<\/tr>\n<tr>\n<td>Training Set<\/td>\n<td>\u8bad\u7ec3\u96c6<\/td>\n<\/tr>\n<tr>\n<td>Transductive Learning<\/td>\n<td>\u76f4\u63a8\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Transductive Transfer Learning<\/td>\n<td>\u76f4\u63a8\u8fc1\u79fb\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Transfer Learning<\/td>\n<td>\u8fc1\u79fb\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Transformer<\/td>\n<td>Transformer<\/td>\n<\/tr>\n<tr>\n<td>Transformer Model<\/td>\n<td>Transformer\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Transpose<\/td>\n<td>\u8f6c\u7f6e<\/td>\n<\/tr>\n<tr>\n<td>Transposed Convolution<\/td>\n<td>\u8f6c\u7f6e\u5377\u79ef<\/td>\n<\/tr>\n<tr>\n<td>Trial And Error<\/td>\n<td>\u8bd5\u9519<\/td>\n<\/tr>\n<tr>\n<td>Trigram<\/td>\n<td>\u4e09\u5143\u8bed\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Turing Machine<\/td>\n<td>\u56fe\u7075\u673a<\/td>\n<\/tr>\n<tr>\n<td>Underfitting<\/td>\n<td>\u6b20\u62df\u5408<\/td>\n<\/tr>\n<tr>\n<td>Undersampling<\/td>\n<td>\u6b20\u91c7\u6837<\/td>\n<\/tr>\n<tr>\n<td>Undirected Graphical Model<\/td>\n<td>\u65e0\u5411\u56fe\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Uniform Distribution<\/td>\n<td>\u5747\u5300\u5206\u5e03<\/td>\n<\/tr>\n<tr>\n<td>Unigram<\/td>\n<td>\u4e00\u5143\u8bed\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Unit<\/td>\n<td>\u5355\u5143<\/td>\n<\/tr>\n<tr>\n<td>Universal Approximation Theorem<\/td>\n<td>\u901a\u7528\u8fd1\u4f3c\u5b9a\u7406<\/td>\n<\/tr>\n<tr>\n<td>Universal Approximator<\/td>\n<td>\u901a\u7528\u8fd1\u4f3c\u5668<\/td>\n<\/tr>\n<tr>\n<td>Universal Function Approximator<\/td>\n<td>\u901a\u7528\u51fd\u6570\u8fd1\u4f3c\u5668<\/td>\n<\/tr>\n<tr>\n<td>Unknown Token<\/td>\n<td>\u672a\u77e5\u8bcd\u5143<\/td>\n<\/tr>\n<tr>\n<td>Unsupervised Layer-Wise Training<\/td>\n<td>\u65e0\u76d1\u7763\u9010\u5c42\u8bad\u7ec3<\/td>\n<\/tr>\n<tr>\n<td>Unsupervised Learning<\/td>\n<td>\u65e0\u76d1\u7763\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Update Gate<\/td>\n<td>\u66f4\u65b0\u95e8<\/td>\n<\/tr>\n<tr>\n<td>Upsampling<\/td>\n<td>\u4e0a\u91c7\u6837<\/td>\n<\/tr>\n<tr>\n<td>V-Structure<\/td>\n<td>V\u578b\u7ed3\u6784<\/td>\n<\/tr>\n<tr>\n<td>Validation Set<\/td>\n<td>\u9a8c\u8bc1\u96c6<\/td>\n<\/tr>\n<tr>\n<td>Validity Index<\/td>\n<td>\u6709\u6548\u6027\u6307\u6807<\/td>\n<\/tr>\n<tr>\n<td>Value Function Approximation<\/td>\n<td>\u503c\u51fd\u6570\u8fd1\u4f3c<\/td>\n<\/tr>\n<tr>\n<td>Value Iteration<\/td>\n<td>\u503c\u8fed\u4ee3<\/td>\n<\/tr>\n<tr>\n<td>Vanishing Gradient Problem<\/td>\n<td>\u68af\u5ea6\u6d88\u5931\u95ee\u9898<\/td>\n<\/tr>\n<tr>\n<td>Vapnik-Chervonenkis Dimension<\/td>\n<td>VC\u7ef4<\/td>\n<\/tr>\n<tr>\n<td>Variable Elimination<\/td>\n<td>\u53d8\u91cf\u6d88\u53bb<\/td>\n<\/tr>\n<tr>\n<td>Variance<\/td>\n<td>\u65b9\u5dee<\/td>\n<\/tr>\n<tr>\n<td>Variational Autoencoder<\/td>\n<td>\u53d8\u5206\u81ea\u7f16\u7801\u5668<\/td>\n<\/tr>\n<tr>\n<td>Variational Inference<\/td>\n<td>\u53d8\u5206\u63a8\u65ad<\/td>\n<\/tr>\n<tr>\n<td>Vector<\/td>\n<td>\u5411\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Vector Space Model<\/td>\n<td>\u5411\u91cf\u7a7a\u95f4\u6a21\u578b<\/td>\n<\/tr>\n<tr>\n<td>Version Space<\/td>\n<td>\u7248\u672c\u7a7a\u95f4<\/td>\n<\/tr>\n<tr>\n<td>Viterbi Algorithm<\/td>\n<td>\u7ef4\u7279\u6bd4\u7b97\u6cd5<\/td>\n<\/tr>\n<tr>\n<td>Vocabulary<\/td>\n<td>\u8bcd\u8868<\/td>\n<\/tr>\n<tr>\n<td>Warp<\/td>\n<td>\u7ebf\u7a0b\u675f<\/td>\n<\/tr>\n<tr>\n<td>Weak Learner<\/td>\n<td>\u5f31\u5b66\u4e60\u5668<\/td>\n<\/tr>\n<tr>\n<td>Weakly Supervised Learning<\/td>\n<td>\u5f31\u76d1\u7763\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Weight<\/td>\n<td>\u6743\u91cd<\/td>\n<\/tr>\n<tr>\n<td>Weight Decay<\/td>\n<td>\u6743\u91cd\u8870\u51cf<\/td>\n<\/tr>\n<tr>\n<td>Weight Sharing<\/td>\n<td>\u6743\u5171\u4eab<\/td>\n<\/tr>\n<tr>\n<td>Weighted Voting<\/td>\n<td>\u52a0\u6743\u6295\u7968<\/td>\n<\/tr>\n<tr>\n<td>Whitening<\/td>\n<td>\u767d\u5316<\/td>\n<\/tr>\n<tr>\n<td>Winner-Take-All<\/td>\n<td>\u80dc\u8005\u901a\u5403<\/td>\n<\/tr>\n<tr>\n<td>Within-Class Scatter Matrix<\/td>\n<td>\u7c7b\u5185\u6563\u5ea6\u77e9\u9635<\/td>\n<\/tr>\n<tr>\n<td>Word Embedding<\/td>\n<td>\u8bcd\u5d4c\u5165<\/td>\n<\/tr>\n<tr>\n<td>Word Sense Disambiguation<\/td>\n<td>\u8bcd\u4e49\u6d88\u6b67<\/td>\n<\/tr>\n<tr>\n<td>Word Vector<\/td>\n<td>\u8bcd\u5411\u91cf<\/td>\n<\/tr>\n<tr>\n<td>Zero Padding<\/td>\n<td>\u96f6\u586b\u5145<\/td>\n<\/tr>\n<tr>\n<td>Zero-Shot Learning<\/td>\n<td>\u96f6\u8bd5\u5b66\u4e60<\/td>\n<\/tr>\n<tr>\n<td>Zipf&#8217;s Law<\/td>\n<td>\u9f50\u666e\u592b\u5b9a\u5f8b<\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<\/section>\n<\/section>\n<section data-color=\"rgb(172, 29, 16)\" data-custom=\"rgb(172, 29, 16)\">\n<section><strong>\u7248\u6743\u58f0\u660e<\/strong><\/p>\n<\/section>\n<\/section>\n<div class=\"pvc_clear\"><\/div>\n<p id=\"pvc_stats_1228\" class=\"pvc_stats total_only  \" data-element-id=\"1228\" style=\"\"><i class=\"pvc-stats-icon medium\" aria-hidden=\"true\"><svg xmlns=\"http:\/\/www.w3.org\/2000\/svg\" version=\"1.0\" viewBox=\"0 0 502 315\" preserveAspectRatio=\"xMidYMid meet\"><g transform=\"translate(0,332) scale(0.1,-0.1)\" fill=\"\" stroke=\"none\"><path d=\"M2394 3279 l-29 -30 -3 -207 c-2 -182 0 -211 15 -242 39 -76 157 -76 196 0 15 31 17 60 15 243 l-3 209 -33 29 c-26 23 -41 29 -80 29 -41 0 -53 -5 -78 -31z\"\/><path d=\"M3085 3251 c-45 -19 -58 -50 -96 -229 -47 -217 -49 -260 -13 -295 52 -53 146 -42 177 20 16 31 87 366 87 410 0 70 -86 122 -155 94z\"\/><path d=\"M1751 3234 c-13 -9 -29 -31 -37 -50 -12 -29 -10 -49 21 -204 19 -94 39 -189 45 -210 14 -50 54 -80 110 -80 34 0 48 6 76 34 21 21 34 44 34 59 0 14 -18 113 -40 219 -37 178 -43 195 -70 221 -36 32 -101 37 -139 11z\"\/><path d=\"M1163 3073 c-36 -7 -73 -59 -73 -102 0 -56 133 -378 171 -413 34 -32 83 -37 129 -13 70 36 67 87 -16 290 -86 209 -89 214 -129 231 -35 14 -42 15 -82 7z\"\/><path d=\"M3689 3066 c-15 -9 -33 -30 -42 -48 -48 -103 -147 -355 -147 -375 0 -98 131 -148 192 -74 13 15 57 108 97 206 80 196 84 226 37 273 -30 30 -99 39 -137 18z\"\/><path d=\"M583 2784 c-38 -19 -67 -74 -58 -113 9 -42 211 -354 242 -373 16 -10 45 -18 66 -18 51 0 107 52 107 100 0 39 -1 41 -124 234 -80 126 -108 162 -133 173 -41 17 -61 16 -100 -3z\"\/><path d=\"M4250 2784 c-14 -9 -74 -91 -133 -183 -95 -150 -107 -173 -107 -213 0 -55 33 -94 87 -104 67 -13 90 8 211 198 130 202 137 225 78 284 -27 27 -42 34 -72 34 -22 0 -50 -8 -64 -16z\"\/><path d=\"M2275 2693 c-553 -48 -1095 -270 -1585 -649 -135 -104 -459 -423 -483 -476 -23 -49 -22 -139 2 -186 73 -142 361 -457 571 -626 285 -228 642 -407 990 -497 242 -63 336 -73 660 -74 310 0 370 5 595 52 535 111 1045 392 1455 803 122 121 250 273 275 326 19 41 19 137 0 174 -41 79 -309 363 -465 492 -447 370 -946 591 -1479 653 -113 14 -422 18 -536 8z m395 -428 c171 -34 330 -124 456 -258 112 -119 167 -219 211 -378 27 -96 24 -300 -5 -401 -72 -255 -236 -447 -474 -557 -132 -62 -201 -76 -368 -76 -167 0 -236 14 -368 76 -213 98 -373 271 -451 485 -162 444 86 934 547 1084 153 49 292 57 452 25z m909 -232 c222 -123 408 -262 593 -441 76 -74 138 -139 138 -144 0 -16 -233 -242 -330 -319 -155 -123 -309 -223 -461 -299 l-81 -41 32 46 c18 26 49 83 70 128 143 306 141 649 -6 957 -25 52 -61 116 -79 142 l-34 47 45 -20 c26 -10 76 -36 113 -56z m-2057 25 c-40 -58 -105 -190 -130 -263 -110 -324 -59 -707 132 -981 25 -35 42 -64 37 -64 -19 0 -241 119 -326 174 -188 122 -406 314 -532 468 l-58 71 108 103 c185 178 428 349 672 473 66 33 121 60 123 61 2 0 -10 -19 -26 -42z\"\/><path d=\"M2375 1950 c-198 -44 -350 -190 -395 -379 -18 -76 -8 -221 19 -290 114 -284 457 -406 731 -260 98 52 188 154 231 260 27 69 37 214 19 290 -38 163 -166 304 -326 360 -67 23 -215 33 -279 19z\"\/><\/g><\/svg><\/i> <img loading=\"lazy\" decoding=\"async\" width=\"16\" height=\"16\" alt=\"Loading\" src=\"https:\/\/aif.amtbbs.org\/wp-content\/plugins\/page-views-count\/ajax-loader-2x.gif\" border=0 \/><\/p>\n<div class=\"pvc_clear\"><\/div>\n","protected":false},"excerpt":{"rendered":"<p>\u4f60\u597d\uff0c\u6211\u662fzhenguo \u8fd9\u662f\u51e0\u4f4d\u673a\u5668\u5b66\u4e60\u6743\u5a01\u4e13\u5bb6\u6c47\u603b\u7684725\u4e2a\u673a\u5668\u5b66\u4e60\u672f\u8bed\u8868\uff0c\u975e\u5e38\u5168\u9762\u4e86\uff0c\u503c\u5f97\u6536\u85cf\uff01 \u82f1\u6587\u672f [&hellip;]<\/p>\n<div class=\"pvc_clear\"><\/div>\n<p id=\"pvc_stats_1228\" class=\"pvc_stats total_only  \" data-element-id=\"1228\" style=\"\"><i class=\"pvc-stats-icon medium\" aria-hidden=\"true\"><svg xmlns=\"http:\/\/www.w3.org\/2000\/svg\" version=\"1.0\" viewBox=\"0 0 502 315\" preserveAspectRatio=\"xMidYMid meet\"><g transform=\"translate(0,332) scale(0.1,-0.1)\" fill=\"\" stroke=\"none\"><path d=\"M2394 3279 l-29 -30 -3 -207 c-2 -182 0 -211 15 -242 39 -76 157 -76 196 0 15 31 17 60 15 243 l-3 209 -33 29 c-26 23 -41 29 -80 29 -41 0 -53 -5 -78 -31z\"\/><path d=\"M3085 3251 c-45 -19 -58 -50 -96 -229 -47 -217 -49 -260 -13 -295 52 -53 146 -42 177 20 16 31 87 366 87 410 0 70 -86 122 -155 94z\"\/><path d=\"M1751 3234 c-13 -9 -29 -31 -37 -50 -12 -29 -10 -49 21 -204 19 -94 39 -189 45 -210 14 -50 54 -80 110 -80 34 0 48 6 76 34 21 21 34 44 34 59 0 14 -18 113 -40 219 -37 178 -43 195 -70 221 -36 32 -101 37 -139 11z\"\/><path d=\"M1163 3073 c-36 -7 -73 -59 -73 -102 0 -56 133 -378 171 -413 34 -32 83 -37 129 -13 70 36 67 87 -16 290 -86 209 -89 214 -129 231 -35 14 -42 15 -82 7z\"\/><path d=\"M3689 3066 c-15 -9 -33 -30 -42 -48 -48 -103 -147 -355 -147 -375 0 -98 131 -148 192 -74 13 15 57 108 97 206 80 196 84 226 37 273 -30 30 -99 39 -137 18z\"\/><path d=\"M583 2784 c-38 -19 -67 -74 -58 -113 9 -42 211 -354 242 -373 16 -10 45 -18 66 -18 51 0 107 52 107 100 0 39 -1 41 -124 234 -80 126 -108 162 -133 173 -41 17 -61 16 -100 -3z\"\/><path d=\"M4250 2784 c-14 -9 -74 -91 -133 -183 -95 -150 -107 -173 -107 -213 0 -55 33 -94 87 -104 67 -13 90 8 211 198 130 202 137 225 78 284 -27 27 -42 34 -72 34 -22 0 -50 -8 -64 -16z\"\/><path d=\"M2275 2693 c-553 -48 -1095 -270 -1585 -649 -135 -104 -459 -423 -483 -476 -23 -49 -22 -139 2 -186 73 -142 361 -457 571 -626 285 -228 642 -407 990 -497 242 -63 336 -73 660 -74 310 0 370 5 595 52 535 111 1045 392 1455 803 122 121 250 273 275 326 19 41 19 137 0 174 -41 79 -309 363 -465 492 -447 370 -946 591 -1479 653 -113 14 -422 18 -536 8z m395 -428 c171 -34 330 -124 456 -258 112 -119 167 -219 211 -378 27 -96 24 -300 -5 -401 -72 -255 -236 -447 -474 -557 -132 -62 -201 -76 -368 -76 -167 0 -236 14 -368 76 -213 98 -373 271 -451 485 -162 444 86 934 547 1084 153 49 292 57 452 25z m909 -232 c222 -123 408 -262 593 -441 76 -74 138 -139 138 -144 0 -16 -233 -242 -330 -319 -155 -123 -309 -223 -461 -299 l-81 -41 32 46 c18 26 49 83 70 128 143 306 141 649 -6 957 -25 52 -61 116 -79 142 l-34 47 45 -20 c26 -10 76 -36 113 -56z m-2057 25 c-40 -58 -105 -190 -130 -263 -110 -324 -59 -707 132 -981 25 -35 42 -64 37 -64 -19 0 -241 119 -326 174 -188 122 -406 314 -532 468 l-58 71 108 103 c185 178 428 349 672 473 66 33 121 60 123 61 2 0 -10 -19 -26 -42z\"\/><path d=\"M2375 1950 c-198 -44 -350 -190 -395 -379 -18 -76 -8 -221 19 -290 114 -284 457 -406 731 -260 98 52 188 154 231 260 27 69 37 214 19 290 -38 163 -166 304 -326 360 -67 23 -215 33 -279 19z\"\/><\/g><\/svg><\/i> <img loading=\"lazy\" decoding=\"async\" width=\"16\" height=\"16\" alt=\"Loading\" src=\"https:\/\/aif.amtbbs.org\/wp-content\/plugins\/page-views-count\/ajax-loader-2x.gif\" border=0 \/><\/p>\n<div class=\"pvc_clear\"><\/div>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[3],"tags":[],"class_list":["post-1228","post","type-post","status-publish","format-standard","hentry","category-ai"],"_links":{"self":[{"href":"https:\/\/aif.amtbbs.org\/index.php\/wp-json\/wp\/v2\/posts\/1228","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/aif.amtbbs.org\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/aif.amtbbs.org\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/aif.amtbbs.org\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/aif.amtbbs.org\/index.php\/wp-json\/wp\/v2\/comments?post=1228"}],"version-history":[{"count":1,"href":"https:\/\/aif.amtbbs.org\/index.php\/wp-json\/wp\/v2\/posts\/1228\/revisions"}],"predecessor-version":[{"id":1229,"href":"https:\/\/aif.amtbbs.org\/index.php\/wp-json\/wp\/v2\/posts\/1228\/revisions\/1229"}],"wp:attachment":[{"href":"https:\/\/aif.amtbbs.org\/index.php\/wp-json\/wp\/v2\/media?parent=1228"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/aif.amtbbs.org\/index.php\/wp-json\/wp\/v2\/categories?post=1228"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/aif.amtbbs.org\/index.php\/wp-json\/wp\/v2\/tags?post=1228"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}