mirror of
				https://github.com/meshtastic/firmware.git
				synced 2025-10-28 23:34:03 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			239 lines
		
	
	
		
			9.5 KiB
		
	
	
	
		
			YAML
		
	
	
	
	
	
			
		
		
	
	
			239 lines
		
	
	
		
			9.5 KiB
		
	
	
	
		
			YAML
		
	
	
	
	
	
| name: Tests
 | |
| 
 | |
| # DISABLED: Changed from automatic PR triggers to manual only
 | |
| on:
 | |
|   workflow_dispatch:
 | |
|     inputs:
 | |
|       reason:
 | |
|         description: "Reason for manual test run"
 | |
|         required: false
 | |
|         default: "Manual test execution"
 | |
| 
 | |
| concurrency:
 | |
|   group: tests-${{ github.head_ref || github.run_id }}
 | |
|   cancel-in-progress: true
 | |
| 
 | |
| permissions:
 | |
|   contents: read
 | |
|   actions: read
 | |
|   checks: write
 | |
|   pull-requests: write
 | |
| 
 | |
| jobs:
 | |
|   native-tests:
 | |
|     name: "🧪 Native Tests"
 | |
|     if: github.repository == 'meshtastic/firmware'
 | |
|     uses: ./.github/workflows/test_native.yml
 | |
|     permissions:
 | |
|       contents: read
 | |
|       actions: read
 | |
|       checks: write
 | |
| 
 | |
|   test-summary:
 | |
|     name: "📊 Test Results"
 | |
|     runs-on: ubuntu-latest
 | |
|     needs: [native-tests]
 | |
|     if: always()
 | |
|     permissions:
 | |
|       contents: read
 | |
|       actions: read
 | |
|       checks: write
 | |
|       pull-requests: write
 | |
|     steps:
 | |
|       - uses: actions/checkout@v5
 | |
|         with:
 | |
|           submodules: recursive
 | |
| 
 | |
|       - name: Get release version string
 | |
|         run: echo "long=$(./bin/buildinfo.py long)" >> $GITHUB_OUTPUT
 | |
|         id: version
 | |
| 
 | |
|       - name: Download test artifacts
 | |
|         if: needs.native-tests.result != 'skipped'
 | |
|         uses: actions/download-artifact@v5
 | |
|         with:
 | |
|           name: platformio-test-report-${{ steps.version.outputs.long }}.zip
 | |
|           merge-multiple: true
 | |
| 
 | |
|       - name: Parse test results and create detailed summary
 | |
|         id: test-results
 | |
|         run: |
 | |
|           echo "## 🧪 Test Results Summary" >> $GITHUB_STEP_SUMMARY
 | |
|           echo "" >> $GITHUB_STEP_SUMMARY
 | |
| 
 | |
|           # Check overall job status first
 | |
|           if [[ "${{ needs.native-tests.result }}" == "success" ]]; then
 | |
|             echo "✅ **Overall Status**: PASSED" >> $GITHUB_STEP_SUMMARY
 | |
|           elif [[ "${{ needs.native-tests.result }}" == "failure" ]]; then
 | |
|             echo "❌ **Overall Status**: FAILED" >> $GITHUB_STEP_SUMMARY
 | |
|           elif [[ "${{ needs.native-tests.result }}" == "cancelled" ]]; then
 | |
|             echo "⏸️ **Overall Status**: CANCELLED" >> $GITHUB_STEP_SUMMARY
 | |
|             echo "" >> $GITHUB_STEP_SUMMARY
 | |
|             echo "Tests were cancelled before completion." >> $GITHUB_STEP_SUMMARY
 | |
|             exit 0
 | |
|           else
 | |
|             echo "⚠️ **Overall Status**: SKIPPED" >> $GITHUB_STEP_SUMMARY
 | |
|             echo "" >> $GITHUB_STEP_SUMMARY
 | |
|             echo "Tests were skipped." >> $GITHUB_STEP_SUMMARY
 | |
|             exit 0
 | |
|           fi
 | |
| 
 | |
|           echo "" >> $GITHUB_STEP_SUMMARY
 | |
| 
 | |
|           # Parse detailed test results if available
 | |
|           if [ -f "testreport.xml" ]; then
 | |
|             echo "### 🔍 Individual Test Results" >> $GITHUB_STEP_SUMMARY
 | |
|             echo "" >> $GITHUB_STEP_SUMMARY
 | |
|             
 | |
|             python3 << 'EOF'
 | |
|           import xml.etree.ElementTree as ET
 | |
|           import os
 | |
| 
 | |
|           try:
 | |
|               tree = ET.parse('testreport.xml')
 | |
|               root = tree.getroot()
 | |
|               
 | |
|               total_tests = 0
 | |
|               passed_tests = 0
 | |
|               failed_tests = 0
 | |
|               skipped_tests = 0
 | |
|               
 | |
|               # Parse testsuite elements
 | |
|               for testsuite in root.findall('.//testsuite'):
 | |
|                   suite_name = testsuite.get('name', 'Unknown')
 | |
|                   suite_tests = int(testsuite.get('tests', '0'))
 | |
|                   suite_failures = int(testsuite.get('failures', '0'))
 | |
|                   suite_errors = int(testsuite.get('errors', '0'))
 | |
|                   suite_skipped = int(testsuite.get('skipped', '0'))
 | |
|                   
 | |
|                   total_tests += suite_tests
 | |
|                   failed_tests += suite_failures + suite_errors
 | |
|                   skipped_tests += suite_skipped
 | |
|                   passed_tests += suite_tests - suite_failures - suite_errors - suite_skipped
 | |
|                   
 | |
|                   if suite_tests > 0:
 | |
|                       status = "✅" if (suite_failures + suite_errors) == 0 else "❌"
 | |
|                       print(f"**{status} Test Suite: {suite_name}**")
 | |
|                       print(f"- Total: {suite_tests}")
 | |
|                       print(f"- Passed: ✅ {suite_tests - suite_failures - suite_errors - suite_skipped}")
 | |
|                       print(f"- Failed: ❌ {suite_failures + suite_errors}")
 | |
|                       if suite_skipped > 0:
 | |
|                           print(f"- Skipped: ⏭️ {suite_skipped}")
 | |
|                       print("")
 | |
|                       
 | |
|                       # Show individual test results for failed suites
 | |
|                       if suite_failures + suite_errors > 0:
 | |
|                           print("**Failed Tests:**")
 | |
|                           for testcase in testsuite.findall('testcase'):
 | |
|                               test_name = testcase.get('name', 'Unknown')
 | |
|                               failure = testcase.find('failure')
 | |
|                               error = testcase.find('error')
 | |
|                               
 | |
|                               if failure is not None:
 | |
|                                   msg = failure.get('message', 'Unknown error')[:100]
 | |
|                                   print(f"- ❌ `{test_name}`: {msg}")
 | |
|                               elif error is not None:
 | |
|                                   msg = error.get('message', 'Unknown error')[:100]
 | |
|                                   print(f"- ❌ `{test_name}`: ERROR - {msg}")
 | |
|                           print("")
 | |
|                       else:
 | |
|                           # Show passed tests for successful suites
 | |
|                           passed_count = 0
 | |
|                           for testcase in testsuite.findall('testcase'):
 | |
|                               if testcase.find('failure') is None and testcase.find('error') is None:
 | |
|                                   if passed_count < 5:  # Limit to first 5 to avoid spam
 | |
|                                       test_name = testcase.get('name', 'Unknown')
 | |
|                                       print(f"- ✅ `{test_name}`: PASSED")
 | |
|                                   passed_count += 1
 | |
|                           if passed_count > 5:
 | |
|                               print(f"- ... and {passed_count - 5} more tests passed")
 | |
|                           print("")
 | |
|               
 | |
|               # Summary statistics
 | |
|               print("### 📊 Test Statistics")
 | |
|               print(f"- **Total Tests**: {total_tests}")
 | |
|               print(f"- **Passed**: ✅ {passed_tests}")
 | |
|               print(f"- **Failed**: ❌ {failed_tests}")
 | |
|               if skipped_tests > 0:
 | |
|                   print(f"- **Skipped**: ⏭️ {skipped_tests}")
 | |
|               
 | |
|               if failed_tests > 0:
 | |
|                   print(f"\n❌ **{failed_tests} tests failed out of {total_tests} total**")
 | |
|               else:
 | |
|                   print(f"\n✅ **All {total_tests} tests passed!**")
 | |
|                   
 | |
|           except Exception as e:
 | |
|               print(f"❌ Error parsing test results: {e}")
 | |
|           EOF
 | |
|           else
 | |
|             echo "⚠️ **No detailed test report available**" >> $GITHUB_STEP_SUMMARY
 | |
|             echo "" >> $GITHUB_STEP_SUMMARY
 | |
|             echo "Test artifacts may not have been generated properly." >> $GITHUB_STEP_SUMMARY
 | |
|           fi
 | |
| 
 | |
|           echo "" >> $GITHUB_STEP_SUMMARY
 | |
|           echo "---" >> $GITHUB_STEP_SUMMARY
 | |
|           echo "View detailed logs in the [Actions tab](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY          
 | |
| 
 | |
|       - name: Comment test results on PR
 | |
|         if: github.event_name == 'pull_request' && needs.native-tests.result != 'skipped'
 | |
|         uses: actions/github-script@v8
 | |
|         with:
 | |
|           script: |
 | |
|             const fs = require('fs');
 | |
| 
 | |
|             // Read the step summary to use as PR comment
 | |
|             let testSummary = "## 🧪 Test Results Summary\n\n";
 | |
| 
 | |
|             if ("${{ needs.native-tests.result }}" === "success") {
 | |
|               testSummary += "✅ **All tests passed!**\n\n";
 | |
|             } else if ("${{ needs.native-tests.result }}" === "failure") {
 | |
|               testSummary += "❌ **Some tests failed.**\n\n";
 | |
|             } else {
 | |
|               testSummary += "⚠️ **Tests did not complete normally.**\n\n";
 | |
|             }
 | |
| 
 | |
|             testSummary += `View detailed results: [Actions Run](${context.payload.repository.html_url}/actions/runs/${context.runId})\n\n`;
 | |
|             testSummary += "---\n";
 | |
|             testSummary += "*This comment will be automatically updated when new commits are pushed.*";
 | |
| 
 | |
|             // Find existing comment
 | |
|             const comments = await github.rest.issues.listComments({
 | |
|               owner: context.repo.owner,
 | |
|               repo: context.repo.repo,
 | |
|               issue_number: context.issue.number
 | |
|             });
 | |
| 
 | |
|             const botComment = comments.data.find(comment => 
 | |
|               comment.user.type === 'Bot' && 
 | |
|               comment.body.includes('🧪 Test Results Summary')
 | |
|             );
 | |
| 
 | |
|             if (botComment) {
 | |
|               // Update existing comment
 | |
|               await github.rest.issues.updateComment({
 | |
|                 owner: context.repo.owner,
 | |
|                 repo: context.repo.repo,
 | |
|                 comment_id: botComment.id,
 | |
|                 body: testSummary
 | |
|               });
 | |
|             } else {
 | |
|               // Create new comment
 | |
|               await github.rest.issues.createComment({
 | |
|                 owner: context.repo.owner,
 | |
|                 repo: context.repo.repo,
 | |
|                 issue_number: context.issue.number,
 | |
|                 body: testSummary
 | |
|               });
 | |
|             }            
 | |
| 
 | |
|       - name: Set overall status
 | |
|         run: |
 | |
|           if [[ "${{ needs.native-tests.result }}" == "success" ]]; then
 | |
|             echo "All tests passed! ✅"
 | |
|             exit 0
 | |
|           else
 | |
|             echo "Some tests failed! ❌"
 | |
|             exit 1
 | |
|           fi          
 | 
![renovate[bot]](/assets/img/avatar_default.png)