Compare commits
67 Commits
android-12
...
android-13
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f5dc5be691 | ||
|
|
40d4e9543b | ||
|
|
875246f5b2 | ||
|
|
b16fefa106 | ||
|
|
2a255b2d61 | ||
|
|
2f9487cd38 | ||
|
|
edce713fc9 | ||
|
|
f75363177e | ||
|
|
4c6217f09b | ||
|
|
c95f35ea85 | ||
|
|
40357098a2 | ||
|
|
e7f4110791 | ||
|
|
ca1dd1862b | ||
|
|
737d1cea62 | ||
|
|
2f8e237ab7 | ||
|
|
5191465b0a | ||
|
|
50c604f37f | ||
|
|
dfbc22c291 | ||
|
|
a5a3167eba | ||
|
|
a423e0f9e0 | ||
|
|
511c1f0c8b | ||
|
|
8369fcd71a | ||
|
|
626916e9a4 | ||
|
|
507f360a81 | ||
|
|
5323d9f6b3 | ||
|
|
770d4b0b72 | ||
|
|
e5fed31009 | ||
|
|
f07484bc64 | ||
|
|
78b9956a04 | ||
|
|
90aa937593 | ||
|
|
940618a64d | ||
|
|
409fa5dda2 | ||
|
|
211b67668d | ||
|
|
f0cd02b9bd | ||
|
|
34101d8c5e | ||
|
|
bf8d7bc0da | ||
|
|
9543adf072 | ||
|
|
036d2686af | ||
|
|
a80e0e7da5 | ||
|
|
9631dedea9 | ||
|
|
75de0cadcf | ||
|
|
4b321c003c | ||
|
|
0a83047368 | ||
|
|
9bb8ac7cb6 | ||
|
|
d6e6ab11b1 | ||
|
|
b3a1f793c3 | ||
|
|
a294beb116 | ||
|
|
eda403388a | ||
|
|
3032980478 | ||
|
|
7f96f4db3f | ||
|
|
a0f9a3ab5b | ||
|
|
b36fec486e | ||
|
|
57cf830862 | ||
|
|
41701052d3 | ||
|
|
b0c6bf497a | ||
|
|
6a7123826a | ||
|
|
6513a356f0 | ||
|
|
65d4a16afd | ||
|
|
ca75c58f43 | ||
|
|
723df0f368 | ||
|
|
94b7ac50bb | ||
|
|
18450ebd78 | ||
|
|
efdb2e8f3d | ||
|
|
7a84a1a974 | ||
|
|
789d9c8af9 | ||
|
|
4df063209b | ||
|
|
6256e3ca8e |
5
.git-blame-ignore-revs
Normal file
5
.git-blame-ignore-revs
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# SPDX-FileCopyrightText: 2023 yuzu Emulator Project
|
||||||
|
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
# CRLF -> LF
|
||||||
|
90aa937593e53a5d5e070fb623b228578b0b225f
|
||||||
2
.github/workflows/android-build.yml
vendored
2
.github/workflows/android-build.yml
vendored
@@ -40,11 +40,11 @@ jobs:
|
|||||||
sudo apt-get install -y ccache apksigner glslang-dev glslang-tools
|
sudo apt-get install -y ccache apksigner glslang-dev glslang-tools
|
||||||
- name: Build
|
- name: Build
|
||||||
run: ./.ci/scripts/android/build.sh
|
run: ./.ci/scripts/android/build.sh
|
||||||
- name: Copy and sign artifacts
|
|
||||||
env:
|
env:
|
||||||
ANDROID_KEYSTORE_B64: ${{ secrets.ANDROID_KEYSTORE_B64 }}
|
ANDROID_KEYSTORE_B64: ${{ secrets.ANDROID_KEYSTORE_B64 }}
|
||||||
ANDROID_KEY_ALIAS: ${{ secrets.ANDROID_KEY_ALIAS }}
|
ANDROID_KEY_ALIAS: ${{ secrets.ANDROID_KEY_ALIAS }}
|
||||||
ANDROID_KEYSTORE_PASS: ${{ secrets.ANDROID_KEYSTORE_PASS }}
|
ANDROID_KEYSTORE_PASS: ${{ secrets.ANDROID_KEYSTORE_PASS }}
|
||||||
|
- name: Copy artifacts
|
||||||
run: ./.ci/scripts/android/upload.sh
|
run: ./.ci/scripts/android/upload.sh
|
||||||
- name: Upload
|
- name: Upload
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
|
|||||||
@@ -1,3 +1,11 @@
|
|||||||
|
| Pull Request | Commit | Title | Author | Merged? |
|
||||||
|
|----|----|----|----|----|
|
||||||
|
|
||||||
|
|
||||||
|
End of merge log. You can find the original README.md below the break.
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
SPDX-FileCopyrightText: 2018 yuzu Emulator Project
|
SPDX-FileCopyrightText: 2018 yuzu Emulator Project
|
||||||
SPDX-License-Identifier: GPL-2.0-or-later
|
SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ if (MSVC)
|
|||||||
# Avoid windows.h from including some usually unused libs like winsocks.h, since this might cause some redefinition errors.
|
# Avoid windows.h from including some usually unused libs like winsocks.h, since this might cause some redefinition errors.
|
||||||
add_definitions(-DWIN32_LEAN_AND_MEAN)
|
add_definitions(-DWIN32_LEAN_AND_MEAN)
|
||||||
|
|
||||||
# Ensure that projects build with Unicode support.
|
# Ensure that projects are built with Unicode support.
|
||||||
add_definitions(-DUNICODE -D_UNICODE)
|
add_definitions(-DUNICODE -D_UNICODE)
|
||||||
|
|
||||||
# /W4 - Level 4 warnings
|
# /W4 - Level 4 warnings
|
||||||
@@ -54,11 +54,11 @@ if (MSVC)
|
|||||||
/GT
|
/GT
|
||||||
|
|
||||||
# Modules
|
# Modules
|
||||||
/experimental:module- # Disable module support explicitly due to conflicts with precompiled headers
|
/experimental:module- # Explicitly disable module support due to conflicts with precompiled headers.
|
||||||
|
|
||||||
# External headers diagnostics
|
# External headers diagnostics
|
||||||
/external:anglebrackets # Treats all headers included by #include <header>, where the header file is enclosed in angle brackets (< >), as external headers
|
/external:anglebrackets # Treats all headers included by #include <header>, where the header file is enclosed in angle brackets (< >), as external headers
|
||||||
/external:W0 # Sets the default warning level to 0 for external headers, effectively turning off warnings for external headers
|
/external:W0 # Sets the default warning level to 0 for external headers, effectively disabling warnings for them.
|
||||||
|
|
||||||
# Warnings
|
# Warnings
|
||||||
/W4
|
/W4
|
||||||
|
|||||||
@@ -252,7 +252,7 @@ object NativeLibrary {
|
|||||||
|
|
||||||
external fun reloadKeys(): Boolean
|
external fun reloadKeys(): Boolean
|
||||||
|
|
||||||
external fun initializeSystem()
|
external fun initializeSystem(reload: Boolean)
|
||||||
|
|
||||||
external fun defaultCPUCore(): Int
|
external fun defaultCPUCore(): Int
|
||||||
|
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import java.io.File
|
|||||||
import org.yuzu.yuzu_emu.utils.DirectoryInitialization
|
import org.yuzu.yuzu_emu.utils.DirectoryInitialization
|
||||||
import org.yuzu.yuzu_emu.utils.DocumentsTree
|
import org.yuzu.yuzu_emu.utils.DocumentsTree
|
||||||
import org.yuzu.yuzu_emu.utils.GpuDriverHelper
|
import org.yuzu.yuzu_emu.utils.GpuDriverHelper
|
||||||
|
import org.yuzu.yuzu_emu.utils.Log
|
||||||
|
|
||||||
fun Context.getPublicFilesDir(): File = getExternalFilesDir(null) ?: filesDir
|
fun Context.getPublicFilesDir(): File = getExternalFilesDir(null) ?: filesDir
|
||||||
|
|
||||||
@@ -49,6 +50,7 @@ class YuzuApplication : Application() {
|
|||||||
DirectoryInitialization.start()
|
DirectoryInitialization.start()
|
||||||
GpuDriverHelper.initializeDriverParameters()
|
GpuDriverHelper.initializeDriverParameters()
|
||||||
NativeLibrary.logDeviceInfo()
|
NativeLibrary.logDeviceInfo()
|
||||||
|
Log.logDeviceInfo()
|
||||||
|
|
||||||
createNotificationChannels()
|
createNotificationChannels()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ class EmulationActivity : AppCompatActivity(), SensorEventListener {
|
|||||||
|
|
||||||
val preferences = PreferenceManager.getDefaultSharedPreferences(YuzuApplication.appContext)
|
val preferences = PreferenceManager.getDefaultSharedPreferences(YuzuApplication.appContext)
|
||||||
if (!preferences.getBoolean(Settings.PREF_MEMORY_WARNING_SHOWN, false)) {
|
if (!preferences.getBoolean(Settings.PREF_MEMORY_WARNING_SHOWN, false)) {
|
||||||
if (MemoryUtil.isLessThan(MemoryUtil.REQUIRED_MEMORY, MemoryUtil.Gb)) {
|
if (MemoryUtil.isLessThan(MemoryUtil.REQUIRED_MEMORY, MemoryUtil.totalMemory)) {
|
||||||
Toast.makeText(
|
Toast.makeText(
|
||||||
this,
|
this,
|
||||||
getString(
|
getString(
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import android.content.DialogInterface
|
|||||||
import android.content.SharedPreferences
|
import android.content.SharedPreferences
|
||||||
import android.content.pm.ActivityInfo
|
import android.content.pm.ActivityInfo
|
||||||
import android.content.res.Configuration
|
import android.content.res.Configuration
|
||||||
import android.graphics.Color
|
|
||||||
import android.net.Uri
|
import android.net.Uri
|
||||||
import android.os.Bundle
|
import android.os.Bundle
|
||||||
import android.os.Handler
|
import android.os.Handler
|
||||||
@@ -155,7 +154,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||||||
}
|
}
|
||||||
|
|
||||||
binding.surfaceEmulation.holder.addCallback(this)
|
binding.surfaceEmulation.holder.addCallback(this)
|
||||||
binding.showFpsText.setTextColor(Color.YELLOW)
|
|
||||||
binding.doneControlConfig.setOnClickListener { stopConfiguringControls() }
|
binding.doneControlConfig.setOnClickListener { stopConfiguringControls() }
|
||||||
|
|
||||||
binding.drawerLayout.addDrawerListener(object : DrawerListener {
|
binding.drawerLayout.addDrawerListener(object : DrawerListener {
|
||||||
@@ -312,6 +310,8 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||||||
ViewUtils.showView(binding.surfaceInputOverlay)
|
ViewUtils.showView(binding.surfaceInputOverlay)
|
||||||
ViewUtils.hideView(binding.loadingIndicator)
|
ViewUtils.hideView(binding.loadingIndicator)
|
||||||
|
|
||||||
|
emulationState.updateSurface()
|
||||||
|
|
||||||
// Setup overlay
|
// Setup overlay
|
||||||
updateShowFpsOverlay()
|
updateShowFpsOverlay()
|
||||||
}
|
}
|
||||||
@@ -412,12 +412,12 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||||||
val FRAMETIME = 2
|
val FRAMETIME = 2
|
||||||
val SPEED = 3
|
val SPEED = 3
|
||||||
perfStatsUpdater = {
|
perfStatsUpdater = {
|
||||||
if (emulationViewModel.emulationStarted.value == true) {
|
if (emulationViewModel.emulationStarted.value) {
|
||||||
val perfStats = NativeLibrary.getPerfStats()
|
val perfStats = NativeLibrary.getPerfStats()
|
||||||
if (perfStats[FPS] > 0 && _binding != null) {
|
if (_binding != null) {
|
||||||
binding.showFpsText.text = String.format("FPS: %.1f", perfStats[FPS])
|
binding.showFpsText.text = String.format("FPS: %.1f", perfStats[FPS])
|
||||||
}
|
}
|
||||||
perfStatsUpdateHandler.postDelayed(perfStatsUpdater!!, 100)
|
perfStatsUpdateHandler.postDelayed(perfStatsUpdater!!, 800)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
perfStatsUpdateHandler.post(perfStatsUpdater!!)
|
perfStatsUpdateHandler.post(perfStatsUpdater!!)
|
||||||
@@ -462,7 +462,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||||||
if (it.orientation == FoldingFeature.Orientation.HORIZONTAL) {
|
if (it.orientation == FoldingFeature.Orientation.HORIZONTAL) {
|
||||||
// Restrict emulation and overlays to the top of the screen
|
// Restrict emulation and overlays to the top of the screen
|
||||||
binding.emulationContainer.layoutParams.height = it.bounds.top
|
binding.emulationContainer.layoutParams.height = it.bounds.top
|
||||||
binding.overlayContainer.layoutParams.height = it.bounds.top
|
|
||||||
// Restrict input and menu drawer to the bottom of the screen
|
// Restrict input and menu drawer to the bottom of the screen
|
||||||
binding.inputContainer.layoutParams.height = it.bounds.bottom
|
binding.inputContainer.layoutParams.height = it.bounds.bottom
|
||||||
binding.inGameMenu.layoutParams.height = it.bounds.bottom
|
binding.inGameMenu.layoutParams.height = it.bounds.bottom
|
||||||
@@ -476,7 +475,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||||||
if (!isFolding) {
|
if (!isFolding) {
|
||||||
binding.emulationContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT
|
binding.emulationContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT
|
||||||
binding.inputContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT
|
binding.inputContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT
|
||||||
binding.overlayContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT
|
|
||||||
binding.inGameMenu.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT
|
binding.inGameMenu.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT
|
||||||
isInFoldableLayout = false
|
isInFoldableLayout = false
|
||||||
updateOrientation()
|
updateOrientation()
|
||||||
@@ -484,7 +482,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||||||
}
|
}
|
||||||
binding.emulationContainer.requestLayout()
|
binding.emulationContainer.requestLayout()
|
||||||
binding.inputContainer.requestLayout()
|
binding.inputContainer.requestLayout()
|
||||||
binding.overlayContainer.requestLayout()
|
|
||||||
binding.inGameMenu.requestLayout()
|
binding.inGameMenu.requestLayout()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -710,24 +707,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||||||
}
|
}
|
||||||
|
|
||||||
v.setPadding(left, cutInsets.top, right, 0)
|
v.setPadding(left, cutInsets.top, right, 0)
|
||||||
|
|
||||||
// Ensure FPS text doesn't get cut off by rounded display corners
|
|
||||||
val sidePadding = resources.getDimensionPixelSize(R.dimen.spacing_xtralarge)
|
|
||||||
if (cutInsets.left == 0) {
|
|
||||||
binding.showFpsText.setPadding(
|
|
||||||
sidePadding,
|
|
||||||
cutInsets.top,
|
|
||||||
cutInsets.right,
|
|
||||||
cutInsets.bottom
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
binding.showFpsText.setPadding(
|
|
||||||
cutInsets.left,
|
|
||||||
cutInsets.top,
|
|
||||||
cutInsets.right,
|
|
||||||
cutInsets.bottom
|
|
||||||
)
|
|
||||||
}
|
|
||||||
windowInsets
|
windowInsets
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -804,6 +783,13 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Synchronized
|
||||||
|
fun updateSurface() {
|
||||||
|
if (surface != null) {
|
||||||
|
NativeLibrary.surfaceChanged(surface)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Synchronized
|
@Synchronized
|
||||||
fun clearSurface() {
|
fun clearSurface() {
|
||||||
if (surface == null) {
|
if (surface == null) {
|
||||||
|
|||||||
@@ -403,7 +403,7 @@ class MainActivity : AppCompatActivity(), ThemeProvider {
|
|||||||
} else {
|
} else {
|
||||||
firmwarePath.deleteRecursively()
|
firmwarePath.deleteRecursively()
|
||||||
cacheFirmwareDir.copyRecursively(firmwarePath, true)
|
cacheFirmwareDir.copyRecursively(firmwarePath, true)
|
||||||
NativeLibrary.initializeSystem()
|
NativeLibrary.initializeSystem(true)
|
||||||
getString(R.string.save_file_imported_success)
|
getString(R.string.save_file_imported_success)
|
||||||
}
|
}
|
||||||
} catch (e: Exception) {
|
} catch (e: Exception) {
|
||||||
@@ -649,7 +649,7 @@ class MainActivity : AppCompatActivity(), ThemeProvider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Reinitialize relevant data
|
// Reinitialize relevant data
|
||||||
NativeLibrary.initializeSystem()
|
NativeLibrary.initializeSystem(true)
|
||||||
gamesViewModel.reloadGames(false)
|
gamesViewModel.reloadGames(false)
|
||||||
|
|
||||||
return@newInstance getString(R.string.user_data_import_success)
|
return@newInstance getString(R.string.user_data_import_success)
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ object DirectoryInitialization {
|
|||||||
fun start() {
|
fun start() {
|
||||||
if (!areDirectoriesReady) {
|
if (!areDirectoriesReady) {
|
||||||
initializeInternalStorage()
|
initializeInternalStorage()
|
||||||
NativeLibrary.initializeSystem()
|
NativeLibrary.initializeSystem(false)
|
||||||
areDirectoriesReady = true
|
areDirectoriesReady = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
|
|
||||||
package org.yuzu.yuzu_emu.utils
|
package org.yuzu.yuzu_emu.utils
|
||||||
|
|
||||||
|
import android.os.Build
|
||||||
|
|
||||||
object Log {
|
object Log {
|
||||||
// Tracks whether we should share the old log or the current log
|
// Tracks whether we should share the old log or the current log
|
||||||
var gameLaunched = false
|
var gameLaunched = false
|
||||||
@@ -16,4 +18,14 @@ object Log {
|
|||||||
external fun error(message: String)
|
external fun error(message: String)
|
||||||
|
|
||||||
external fun critical(message: String)
|
external fun critical(message: String)
|
||||||
|
|
||||||
|
fun logDeviceInfo() {
|
||||||
|
info("Device Manufacturer - ${Build.MANUFACTURER}")
|
||||||
|
info("Device Model - ${Build.MODEL}")
|
||||||
|
if (Build.VERSION.SDK_INT > Build.VERSION_CODES.R) {
|
||||||
|
info("SoC Manufacturer - ${Build.SOC_MANUFACTURER}")
|
||||||
|
info("SoC Model - ${Build.SOC_MODEL}")
|
||||||
|
}
|
||||||
|
info("Total System Memory - ${MemoryUtil.getDeviceRAM()}")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ object MemoryUtil {
|
|||||||
const val Pb = Tb * 1024
|
const val Pb = Tb * 1024
|
||||||
const val Eb = Pb * 1024
|
const val Eb = Pb * 1024
|
||||||
|
|
||||||
private fun bytesToSizeUnit(size: Float): String =
|
private fun bytesToSizeUnit(size: Float, roundUp: Boolean = false): String =
|
||||||
when {
|
when {
|
||||||
size < Kb -> {
|
size < Kb -> {
|
||||||
context.getString(
|
context.getString(
|
||||||
@@ -39,63 +39,59 @@ object MemoryUtil {
|
|||||||
size < Mb -> {
|
size < Mb -> {
|
||||||
context.getString(
|
context.getString(
|
||||||
R.string.memory_formatted,
|
R.string.memory_formatted,
|
||||||
(size / Kb).hundredths,
|
if (roundUp) ceil(size / Kb) else (size / Kb).hundredths,
|
||||||
context.getString(R.string.memory_kilobyte)
|
context.getString(R.string.memory_kilobyte)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
size < Gb -> {
|
size < Gb -> {
|
||||||
context.getString(
|
context.getString(
|
||||||
R.string.memory_formatted,
|
R.string.memory_formatted,
|
||||||
(size / Mb).hundredths,
|
if (roundUp) ceil(size / Mb) else (size / Mb).hundredths,
|
||||||
context.getString(R.string.memory_megabyte)
|
context.getString(R.string.memory_megabyte)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
size < Tb -> {
|
size < Tb -> {
|
||||||
context.getString(
|
context.getString(
|
||||||
R.string.memory_formatted,
|
R.string.memory_formatted,
|
||||||
(size / Gb).hundredths,
|
if (roundUp) ceil(size / Gb) else (size / Gb).hundredths,
|
||||||
context.getString(R.string.memory_gigabyte)
|
context.getString(R.string.memory_gigabyte)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
size < Pb -> {
|
size < Pb -> {
|
||||||
context.getString(
|
context.getString(
|
||||||
R.string.memory_formatted,
|
R.string.memory_formatted,
|
||||||
(size / Tb).hundredths,
|
if (roundUp) ceil(size / Tb) else (size / Tb).hundredths,
|
||||||
context.getString(R.string.memory_terabyte)
|
context.getString(R.string.memory_terabyte)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
size < Eb -> {
|
size < Eb -> {
|
||||||
context.getString(
|
context.getString(
|
||||||
R.string.memory_formatted,
|
R.string.memory_formatted,
|
||||||
(size / Pb).hundredths,
|
if (roundUp) ceil(size / Pb) else (size / Pb).hundredths,
|
||||||
context.getString(R.string.memory_petabyte)
|
context.getString(R.string.memory_petabyte)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
else -> {
|
else -> {
|
||||||
context.getString(
|
context.getString(
|
||||||
R.string.memory_formatted,
|
R.string.memory_formatted,
|
||||||
(size / Eb).hundredths,
|
if (roundUp) ceil(size / Eb) else (size / Eb).hundredths,
|
||||||
context.getString(R.string.memory_exabyte)
|
context.getString(R.string.memory_exabyte)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Devices are unlikely to have 0.5GB increments of memory so we'll just round up to account for
|
val totalMemory: Float
|
||||||
// the potential error created by memInfo.totalMem
|
|
||||||
private val totalMemory: Float
|
|
||||||
get() {
|
get() {
|
||||||
val memInfo = ActivityManager.MemoryInfo()
|
val memInfo = ActivityManager.MemoryInfo()
|
||||||
with(context.getSystemService(Context.ACTIVITY_SERVICE) as ActivityManager) {
|
with(context.getSystemService(Context.ACTIVITY_SERVICE) as ActivityManager) {
|
||||||
getMemoryInfo(memInfo)
|
getMemoryInfo(memInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ceil(
|
return if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.UPSIDE_DOWN_CAKE) {
|
||||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.UPSIDE_DOWN_CAKE) {
|
|
||||||
memInfo.advertisedMem.toFloat()
|
memInfo.advertisedMem.toFloat()
|
||||||
} else {
|
} else {
|
||||||
memInfo.totalMem.toFloat()
|
memInfo.totalMem.toFloat()
|
||||||
}
|
}
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fun isLessThan(minimum: Int, size: Float): Boolean =
|
fun isLessThan(minimum: Int, size: Float): Boolean =
|
||||||
@@ -109,5 +105,7 @@ object MemoryUtil {
|
|||||||
else -> totalMemory < Kb && totalMemory < minimum
|
else -> totalMemory < Kb && totalMemory < minimum
|
||||||
}
|
}
|
||||||
|
|
||||||
fun getDeviceRAM(): String = bytesToSizeUnit(totalMemory)
|
// Devices are unlikely to have 0.5GB increments of memory so we'll just round up to account for
|
||||||
|
// the potential error created by memInfo.totalMem
|
||||||
|
fun getDeviceRAM(): String = bytesToSizeUnit(totalMemory, true)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -199,8 +199,8 @@ bool EmulationSession::IsPaused() const {
|
|||||||
return m_is_running && m_is_paused;
|
return m_is_running && m_is_paused;
|
||||||
}
|
}
|
||||||
|
|
||||||
const Core::PerfStatsResults& EmulationSession::PerfStats() const {
|
const Core::PerfStatsResults& EmulationSession::PerfStats() {
|
||||||
std::scoped_lock m_perf_stats_lock(m_perf_stats_mutex);
|
m_perf_stats = m_system.GetAndResetPerfStats();
|
||||||
return m_perf_stats;
|
return m_perf_stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -247,11 +247,13 @@ void EmulationSession::ConfigureFilesystemProvider(const std::string& filepath)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmulationSession::InitializeSystem() {
|
void EmulationSession::InitializeSystem(bool reload) {
|
||||||
|
if (!reload) {
|
||||||
// Initialize logging system
|
// Initialize logging system
|
||||||
Common::Log::Initialize();
|
Common::Log::Initialize();
|
||||||
Common::Log::SetColorConsoleBackendEnabled(true);
|
Common::Log::SetColorConsoleBackendEnabled(true);
|
||||||
Common::Log::Start();
|
Common::Log::Start();
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize filesystem.
|
// Initialize filesystem.
|
||||||
m_system.SetFilesystem(m_vfs);
|
m_system.SetFilesystem(m_vfs);
|
||||||
@@ -381,11 +383,6 @@ void EmulationSession::RunEmulation() {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
|
||||||
// Refresh performance stats.
|
|
||||||
std::scoped_lock m_perf_stats_lock(m_perf_stats_mutex);
|
|
||||||
m_perf_stats = m_system.GetAndResetPerfStats();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -667,12 +664,15 @@ void Java_org_yuzu_yuzu_1emu_NativeLibrary_onTouchReleased(JNIEnv* env, jclass c
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Java_org_yuzu_yuzu_1emu_NativeLibrary_initializeSystem(JNIEnv* env, jclass clazz) {
|
void Java_org_yuzu_yuzu_1emu_NativeLibrary_initializeSystem(JNIEnv* env, jclass clazz,
|
||||||
|
jboolean reload) {
|
||||||
// Create the default config.ini.
|
// Create the default config.ini.
|
||||||
Config{};
|
Config{};
|
||||||
// Initialize the emulated system.
|
// Initialize the emulated system.
|
||||||
|
if (!reload) {
|
||||||
EmulationSession::GetInstance().System().Initialize();
|
EmulationSession::GetInstance().System().Initialize();
|
||||||
EmulationSession::GetInstance().InitializeSystem();
|
}
|
||||||
|
EmulationSession::GetInstance().InitializeSystem(reload);
|
||||||
}
|
}
|
||||||
|
|
||||||
jint Java_org_yuzu_yuzu_1emu_NativeLibrary_defaultCPUCore(JNIEnv* env, jclass clazz) {
|
jint Java_org_yuzu_yuzu_1emu_NativeLibrary_defaultCPUCore(JNIEnv* env, jclass clazz) {
|
||||||
|
|||||||
@@ -41,9 +41,9 @@ public:
|
|||||||
void RunEmulation();
|
void RunEmulation();
|
||||||
void ShutdownEmulation();
|
void ShutdownEmulation();
|
||||||
|
|
||||||
const Core::PerfStatsResults& PerfStats() const;
|
const Core::PerfStatsResults& PerfStats();
|
||||||
void ConfigureFilesystemProvider(const std::string& filepath);
|
void ConfigureFilesystemProvider(const std::string& filepath);
|
||||||
void InitializeSystem();
|
void InitializeSystem(bool reload);
|
||||||
Core::SystemResultStatus InitializeEmulation(const std::string& filepath);
|
Core::SystemResultStatus InitializeEmulation(const std::string& filepath);
|
||||||
|
|
||||||
bool IsHandheldOnly();
|
bool IsHandheldOnly();
|
||||||
@@ -80,6 +80,5 @@ private:
|
|||||||
|
|
||||||
// Synchronization
|
// Synchronization
|
||||||
std::condition_variable_any m_cv;
|
std::condition_variable_any m_cv;
|
||||||
mutable std::mutex m_perf_stats_mutex;
|
|
||||||
mutable std::mutex m_mutex;
|
mutable std::mutex m_mutex;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -134,16 +134,18 @@
|
|||||||
<FrameLayout
|
<FrameLayout
|
||||||
android:id="@+id/overlay_container"
|
android:id="@+id/overlay_container"
|
||||||
android:layout_width="match_parent"
|
android:layout_width="match_parent"
|
||||||
android:layout_height="match_parent">
|
android:layout_height="match_parent"
|
||||||
|
android:fitsSystemWindows="true">
|
||||||
|
|
||||||
<TextView
|
<com.google.android.material.textview.MaterialTextView
|
||||||
android:id="@+id/show_fps_text"
|
android:id="@+id/show_fps_text"
|
||||||
|
style="@style/TextAppearance.Material3.BodyMedium"
|
||||||
android:layout_width="wrap_content"
|
android:layout_width="wrap_content"
|
||||||
android:layout_height="wrap_content"
|
android:layout_height="wrap_content"
|
||||||
android:layout_gravity="left"
|
android:layout_gravity="left"
|
||||||
android:clickable="false"
|
android:clickable="false"
|
||||||
android:focusable="false"
|
android:focusable="false"
|
||||||
android:shadowColor="@android:color/black"
|
android:paddingHorizontal="20dp"
|
||||||
android:textColor="@android:color/white"
|
android:textColor="@android:color/white"
|
||||||
android:textSize="12sp"
|
android:textSize="12sp"
|
||||||
tools:ignore="RtlHardcoded" />
|
tools:ignore="RtlHardcoded" />
|
||||||
|
|||||||
@@ -30,9 +30,9 @@ bool IsValidMultiStreamChannelCount(u32 channel_count) {
|
|||||||
return channel_count <= OpusStreamCountMax;
|
return channel_count <= OpusStreamCountMax;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsValidMultiStreamStreamCounts(s32 total_stream_count, s32 sterero_stream_count) {
|
bool IsValidMultiStreamStreamCounts(s32 total_stream_count, s32 stereo_stream_count) {
|
||||||
return IsValidMultiStreamChannelCount(total_stream_count) && total_stream_count > 0 &&
|
return IsValidMultiStreamChannelCount(total_stream_count) && total_stream_count > 0 &&
|
||||||
sterero_stream_count > 0 && sterero_stream_count <= total_stream_count;
|
stereo_stream_count >= 0 && stereo_stream_count <= total_stream_count;
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ bool IsValidSampleRate(u32 sample_rate) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool IsValidStreamCount(u32 channel_count, u32 total_stream_count, u32 stereo_stream_count) {
|
bool IsValidStreamCount(u32 channel_count, u32 total_stream_count, u32 stereo_stream_count) {
|
||||||
return total_stream_count > 0 && stereo_stream_count > 0 &&
|
return total_stream_count > 0 && static_cast<s32>(stereo_stream_count) >= 0 &&
|
||||||
stereo_stream_count <= total_stream_count &&
|
stereo_stream_count <= total_stream_count &&
|
||||||
total_stream_count + stereo_stream_count <= channel_count;
|
total_stream_count + stereo_stream_count <= channel_count;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#ifdef ANDROID
|
||||||
|
#include <sys/system_properties.h>
|
||||||
|
#endif
|
||||||
#include "common/arm64/native_clock.h"
|
#include "common/arm64/native_clock.h"
|
||||||
|
|
||||||
namespace Common::Arm64 {
|
namespace Common::Arm64 {
|
||||||
@@ -65,7 +68,23 @@ bool NativeClock::IsNative() const {
|
|||||||
|
|
||||||
u64 NativeClock::GetHostCNTFRQ() {
|
u64 NativeClock::GetHostCNTFRQ() {
|
||||||
u64 cntfrq_el0 = 0;
|
u64 cntfrq_el0 = 0;
|
||||||
|
std::string_view board{""};
|
||||||
|
#ifdef ANDROID
|
||||||
|
char buffer[PROP_VALUE_MAX];
|
||||||
|
int len{__system_property_get("ro.product.board", buffer)};
|
||||||
|
board = std::string_view(buffer, static_cast<size_t>(len));
|
||||||
|
#endif
|
||||||
|
if (board == "s5e9925") { // Exynos 2200
|
||||||
|
cntfrq_el0 = 25600000;
|
||||||
|
} else if (board == "exynos2100") { // Exynos 2100
|
||||||
|
cntfrq_el0 = 26000000;
|
||||||
|
} else if (board == "exynos9810") { // Exynos 9810
|
||||||
|
cntfrq_el0 = 26000000;
|
||||||
|
} else if (board == "s5e8825") { // Exynos 1280
|
||||||
|
cntfrq_el0 = 26000000;
|
||||||
|
} else {
|
||||||
asm("mrs %[cntfrq_el0], cntfrq_el0" : [cntfrq_el0] "=r"(cntfrq_el0));
|
asm("mrs %[cntfrq_el0], cntfrq_el0" : [cntfrq_el0] "=r"(cntfrq_el0));
|
||||||
|
}
|
||||||
return cntfrq_el0;
|
return cntfrq_el0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -9,12 +9,12 @@ PageTable::PageTable() = default;
|
|||||||
|
|
||||||
PageTable::~PageTable() noexcept = default;
|
PageTable::~PageTable() noexcept = default;
|
||||||
|
|
||||||
bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
|
bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
|
||||||
u64 address) const {
|
Common::ProcessAddress address) const {
|
||||||
// Setup invalid defaults.
|
// Setup invalid defaults.
|
||||||
out_entry.phys_addr = 0;
|
out_entry->phys_addr = 0;
|
||||||
out_entry.block_size = page_size;
|
out_entry->block_size = page_size;
|
||||||
out_context.next_page = 0;
|
out_context->next_page = 0;
|
||||||
|
|
||||||
// Validate that we can read the actual entry.
|
// Validate that we can read the actual entry.
|
||||||
const auto page = address / page_size;
|
const auto page = address / page_size;
|
||||||
@@ -29,20 +29,20 @@ bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Populate the results.
|
// Populate the results.
|
||||||
out_entry.phys_addr = phys_addr + address;
|
out_entry->phys_addr = phys_addr + GetInteger(address);
|
||||||
out_context.next_page = page + 1;
|
out_context->next_page = page + 1;
|
||||||
out_context.next_offset = address + page_size;
|
out_context->next_offset = GetInteger(address) + page_size;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const {
|
bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const {
|
||||||
// Setup invalid defaults.
|
// Setup invalid defaults.
|
||||||
out_entry.phys_addr = 0;
|
out_entry->phys_addr = 0;
|
||||||
out_entry.block_size = page_size;
|
out_entry->block_size = page_size;
|
||||||
|
|
||||||
// Validate that we can read the actual entry.
|
// Validate that we can read the actual entry.
|
||||||
const auto page = context.next_page;
|
const auto page = context->next_page;
|
||||||
if (page >= backing_addr.size()) {
|
if (page >= backing_addr.size()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -54,9 +54,9 @@ bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& c
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Populate the results.
|
// Populate the results.
|
||||||
out_entry.phys_addr = phys_addr + context.next_offset;
|
out_entry->phys_addr = phys_addr + context->next_offset;
|
||||||
context.next_page = page + 1;
|
context->next_page = page + 1;
|
||||||
context.next_offset += page_size;
|
context->next_offset += page_size;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "common/typed_address.h"
|
||||||
#include "common/virtual_buffer.h"
|
#include "common/virtual_buffer.h"
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
@@ -100,9 +101,9 @@ struct PageTable {
|
|||||||
PageTable(PageTable&&) noexcept = default;
|
PageTable(PageTable&&) noexcept = default;
|
||||||
PageTable& operator=(PageTable&&) noexcept = default;
|
PageTable& operator=(PageTable&&) noexcept = default;
|
||||||
|
|
||||||
bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
|
bool BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
|
||||||
u64 address) const;
|
Common::ProcessAddress address) const;
|
||||||
bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const;
|
bool ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resizes the page table to be able to accommodate enough pages within
|
* Resizes the page table to be able to accommodate enough pages within
|
||||||
@@ -117,6 +118,16 @@ struct PageTable {
|
|||||||
return current_address_space_width_in_bits;
|
return current_address_space_width_in_bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool GetPhysicalAddress(Common::PhysicalAddress* out_phys_addr,
|
||||||
|
Common::ProcessAddress virt_addr) const {
|
||||||
|
if (virt_addr > (1ULL << this->GetAddressSpaceBits())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
*out_phys_addr = backing_addr[virt_addr / page_size] + GetInteger(virt_addr);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Vector of memory pointers backing each page. An entry can only be non-null if the
|
* Vector of memory pointers backing each page. An entry can only be non-null if the
|
||||||
* corresponding attribute element is of type `Memory`.
|
* corresponding attribute element is of type `Memory`.
|
||||||
|
|||||||
@@ -271,8 +271,9 @@ add_library(core STATIC
|
|||||||
hle/kernel/k_page_heap.h
|
hle/kernel/k_page_heap.h
|
||||||
hle/kernel/k_page_group.cpp
|
hle/kernel/k_page_group.cpp
|
||||||
hle/kernel/k_page_group.h
|
hle/kernel/k_page_group.h
|
||||||
hle/kernel/k_page_table.cpp
|
|
||||||
hle/kernel/k_page_table.h
|
hle/kernel/k_page_table.h
|
||||||
|
hle/kernel/k_page_table_base.cpp
|
||||||
|
hle/kernel/k_page_table_base.h
|
||||||
hle/kernel/k_page_table_manager.h
|
hle/kernel/k_page_table_manager.h
|
||||||
hle/kernel/k_page_table_slab_heap.h
|
hle/kernel/k_page_table_slab_heap.h
|
||||||
hle/kernel/k_port.cpp
|
hle/kernel/k_port.cpp
|
||||||
@@ -280,6 +281,7 @@ add_library(core STATIC
|
|||||||
hle/kernel/k_priority_queue.h
|
hle/kernel/k_priority_queue.h
|
||||||
hle/kernel/k_process.cpp
|
hle/kernel/k_process.cpp
|
||||||
hle/kernel/k_process.h
|
hle/kernel/k_process.h
|
||||||
|
hle/kernel/k_process_page_table.h
|
||||||
hle/kernel/k_readable_event.cpp
|
hle/kernel/k_readable_event.cpp
|
||||||
hle/kernel/k_readable_event.h
|
hle/kernel/k_readable_event.h
|
||||||
hle/kernel/k_resource_limit.cpp
|
hle/kernel/k_resource_limit.cpp
|
||||||
@@ -330,8 +332,6 @@ add_library(core STATIC
|
|||||||
hle/kernel/physical_core.cpp
|
hle/kernel/physical_core.cpp
|
||||||
hle/kernel/physical_core.h
|
hle/kernel/physical_core.h
|
||||||
hle/kernel/physical_memory.h
|
hle/kernel/physical_memory.h
|
||||||
hle/kernel/process_capability.cpp
|
|
||||||
hle/kernel/process_capability.h
|
|
||||||
hle/kernel/slab_helpers.h
|
hle/kernel/slab_helpers.h
|
||||||
hle/kernel/svc.cpp
|
hle/kernel/svc.cpp
|
||||||
hle/kernel/svc.h
|
hle/kernel/svc.h
|
||||||
|
|||||||
@@ -727,29 +727,34 @@ static constexpr const char* GetMemoryPermissionString(const Kernel::Svc::Memory
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static VAddr GetModuleEnd(Kernel::KPageTable& page_table, VAddr base) {
|
static VAddr GetModuleEnd(Kernel::KProcessPageTable& page_table, VAddr base) {
|
||||||
Kernel::Svc::MemoryInfo mem_info;
|
Kernel::KMemoryInfo mem_info;
|
||||||
|
Kernel::Svc::MemoryInfo svc_mem_info;
|
||||||
|
Kernel::Svc::PageInfo page_info;
|
||||||
VAddr cur_addr{base};
|
VAddr cur_addr{base};
|
||||||
|
|
||||||
// Expect: r-x Code (.text)
|
// Expect: r-x Code (.text)
|
||||||
mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
|
R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
|
||||||
cur_addr = mem_info.base_address + mem_info.size;
|
svc_mem_info = mem_info.GetSvcMemoryInfo();
|
||||||
if (mem_info.state != Kernel::Svc::MemoryState::Code ||
|
cur_addr = svc_mem_info.base_address + svc_mem_info.size;
|
||||||
mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) {
|
if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
|
||||||
|
svc_mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) {
|
||||||
return cur_addr - 1;
|
return cur_addr - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expect: r-- Code (.rodata)
|
// Expect: r-- Code (.rodata)
|
||||||
mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
|
R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
|
||||||
cur_addr = mem_info.base_address + mem_info.size;
|
svc_mem_info = mem_info.GetSvcMemoryInfo();
|
||||||
if (mem_info.state != Kernel::Svc::MemoryState::Code ||
|
cur_addr = svc_mem_info.base_address + svc_mem_info.size;
|
||||||
mem_info.permission != Kernel::Svc::MemoryPermission::Read) {
|
if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
|
||||||
|
svc_mem_info.permission != Kernel::Svc::MemoryPermission::Read) {
|
||||||
return cur_addr - 1;
|
return cur_addr - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expect: rw- CodeData (.data)
|
// Expect: rw- CodeData (.data)
|
||||||
mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
|
R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
|
||||||
cur_addr = mem_info.base_address + mem_info.size;
|
svc_mem_info = mem_info.GetSvcMemoryInfo();
|
||||||
|
cur_addr = svc_mem_info.base_address + svc_mem_info.size;
|
||||||
return cur_addr - 1;
|
return cur_addr - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -767,7 +772,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
|||||||
|
|
||||||
if (command_str == "get fastmem") {
|
if (command_str == "get fastmem") {
|
||||||
if (Settings::IsFastmemEnabled()) {
|
if (Settings::IsFastmemEnabled()) {
|
||||||
const auto& impl = page_table.PageTableImpl();
|
const auto& impl = page_table.GetImpl();
|
||||||
const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena);
|
const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena);
|
||||||
const auto region_bits = impl.current_address_space_width_in_bits;
|
const auto region_bits = impl.current_address_space_width_in_bits;
|
||||||
const auto region_size = 1ULL << region_bits;
|
const auto region_size = 1ULL << region_bits;
|
||||||
@@ -785,20 +790,22 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
|||||||
reply = fmt::format("Process: {:#x} ({})\n"
|
reply = fmt::format("Process: {:#x} ({})\n"
|
||||||
"Program Id: {:#018x}\n",
|
"Program Id: {:#018x}\n",
|
||||||
process->GetProcessId(), process->GetName(), process->GetProgramId());
|
process->GetProcessId(), process->GetName(), process->GetProgramId());
|
||||||
reply += fmt::format("Layout:\n"
|
reply += fmt::format(
|
||||||
|
"Layout:\n"
|
||||||
" Alias: {:#012x} - {:#012x}\n"
|
" Alias: {:#012x} - {:#012x}\n"
|
||||||
" Heap: {:#012x} - {:#012x}\n"
|
" Heap: {:#012x} - {:#012x}\n"
|
||||||
" Aslr: {:#012x} - {:#012x}\n"
|
" Aslr: {:#012x} - {:#012x}\n"
|
||||||
" Stack: {:#012x} - {:#012x}\n"
|
" Stack: {:#012x} - {:#012x}\n"
|
||||||
"Modules:\n",
|
"Modules:\n",
|
||||||
GetInteger(page_table.GetAliasRegionStart()),
|
GetInteger(page_table.GetAliasRegionStart()),
|
||||||
GetInteger(page_table.GetAliasRegionEnd()),
|
GetInteger(page_table.GetAliasRegionStart()) + page_table.GetAliasRegionSize() - 1,
|
||||||
GetInteger(page_table.GetHeapRegionStart()),
|
GetInteger(page_table.GetHeapRegionStart()),
|
||||||
GetInteger(page_table.GetHeapRegionEnd()),
|
GetInteger(page_table.GetHeapRegionStart()) + page_table.GetHeapRegionSize() - 1,
|
||||||
GetInteger(page_table.GetAliasCodeRegionStart()),
|
GetInteger(page_table.GetAliasCodeRegionStart()),
|
||||||
GetInteger(page_table.GetAliasCodeRegionEnd()),
|
GetInteger(page_table.GetAliasCodeRegionStart()) + page_table.GetAliasCodeRegionSize() -
|
||||||
|
1,
|
||||||
GetInteger(page_table.GetStackRegionStart()),
|
GetInteger(page_table.GetStackRegionStart()),
|
||||||
GetInteger(page_table.GetStackRegionEnd()));
|
GetInteger(page_table.GetStackRegionStart()) + page_table.GetStackRegionSize() - 1);
|
||||||
|
|
||||||
for (const auto& [vaddr, name] : modules) {
|
for (const auto& [vaddr, name] : modules) {
|
||||||
reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr,
|
reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr,
|
||||||
@@ -811,27 +818,34 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
using MemoryAttribute = Kernel::Svc::MemoryAttribute;
|
using MemoryAttribute = Kernel::Svc::MemoryAttribute;
|
||||||
|
|
||||||
auto mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
|
Kernel::KMemoryInfo mem_info{};
|
||||||
|
Kernel::Svc::PageInfo page_info{};
|
||||||
|
R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info),
|
||||||
|
cur_addr));
|
||||||
|
auto svc_mem_info = mem_info.GetSvcMemoryInfo();
|
||||||
|
|
||||||
if (mem_info.state != Kernel::Svc::MemoryState::Inaccessible ||
|
if (svc_mem_info.state != Kernel::Svc::MemoryState::Inaccessible ||
|
||||||
mem_info.base_address + mem_info.size - 1 != std::numeric_limits<u64>::max()) {
|
svc_mem_info.base_address + svc_mem_info.size - 1 !=
|
||||||
const char* state = GetMemoryStateName(mem_info.state);
|
std::numeric_limits<u64>::max()) {
|
||||||
const char* perm = GetMemoryPermissionString(mem_info);
|
const char* state = GetMemoryStateName(svc_mem_info.state);
|
||||||
|
const char* perm = GetMemoryPermissionString(svc_mem_info);
|
||||||
|
|
||||||
const char l = True(mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-';
|
const char l = True(svc_mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-';
|
||||||
const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
|
const char i =
|
||||||
const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
|
True(svc_mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
|
||||||
const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
|
const char d =
|
||||||
|
True(svc_mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
|
||||||
|
const char u = True(svc_mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
|
||||||
const char p =
|
const char p =
|
||||||
True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
|
True(svc_mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
|
||||||
|
|
||||||
reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n",
|
reply += fmt::format(
|
||||||
mem_info.base_address,
|
" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", svc_mem_info.base_address,
|
||||||
mem_info.base_address + mem_info.size - 1, perm, state, l, i,
|
svc_mem_info.base_address + svc_mem_info.size - 1, perm, state, l, i, d, u, p,
|
||||||
d, u, p, mem_info.ipc_count, mem_info.device_count);
|
svc_mem_info.ipc_count, svc_mem_info.device_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
const uintptr_t next_address = mem_info.base_address + mem_info.size;
|
const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
|
||||||
if (next_address <= cur_addr) {
|
if (next_address <= cur_addr) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,13 +35,14 @@ struct RomFSHeader {
|
|||||||
static_assert(sizeof(RomFSHeader) == 0x50, "RomFSHeader has incorrect size.");
|
static_assert(sizeof(RomFSHeader) == 0x50, "RomFSHeader has incorrect size.");
|
||||||
|
|
||||||
struct DirectoryEntry {
|
struct DirectoryEntry {
|
||||||
|
u32_le parent;
|
||||||
u32_le sibling;
|
u32_le sibling;
|
||||||
u32_le child_dir;
|
u32_le child_dir;
|
||||||
u32_le child_file;
|
u32_le child_file;
|
||||||
u32_le hash;
|
u32_le hash;
|
||||||
u32_le name_length;
|
u32_le name_length;
|
||||||
};
|
};
|
||||||
static_assert(sizeof(DirectoryEntry) == 0x14, "DirectoryEntry has incorrect size.");
|
static_assert(sizeof(DirectoryEntry) == 0x18, "DirectoryEntry has incorrect size.");
|
||||||
|
|
||||||
struct FileEntry {
|
struct FileEntry {
|
||||||
u32_le parent;
|
u32_le parent;
|
||||||
@@ -64,25 +65,22 @@ std::pair<Entry, std::string> GetEntry(const VirtualFile& file, std::size_t offs
|
|||||||
return {entry, string};
|
return {entry, string};
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProcessFile(VirtualFile file, std::size_t file_offset, std::size_t data_offset,
|
void ProcessFile(const VirtualFile& file, std::size_t file_offset, std::size_t data_offset,
|
||||||
u32 this_file_offset, std::shared_ptr<VectorVfsDirectory> parent) {
|
u32 this_file_offset, std::shared_ptr<VectorVfsDirectory>& parent) {
|
||||||
while (true) {
|
while (this_file_offset != ROMFS_ENTRY_EMPTY) {
|
||||||
auto entry = GetEntry<FileEntry>(file, file_offset + this_file_offset);
|
auto entry = GetEntry<FileEntry>(file, file_offset + this_file_offset);
|
||||||
|
|
||||||
parent->AddFile(std::make_shared<OffsetVfsFile>(
|
parent->AddFile(std::make_shared<OffsetVfsFile>(
|
||||||
file, entry.first.size, entry.first.offset + data_offset, entry.second));
|
file, entry.first.size, entry.first.offset + data_offset, entry.second));
|
||||||
|
|
||||||
if (entry.first.sibling == ROMFS_ENTRY_EMPTY)
|
|
||||||
break;
|
|
||||||
|
|
||||||
this_file_offset = entry.first.sibling;
|
this_file_offset = entry.first.sibling;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProcessDirectory(VirtualFile file, std::size_t dir_offset, std::size_t file_offset,
|
void ProcessDirectory(const VirtualFile& file, std::size_t dir_offset, std::size_t file_offset,
|
||||||
std::size_t data_offset, u32 this_dir_offset,
|
std::size_t data_offset, u32 this_dir_offset,
|
||||||
std::shared_ptr<VectorVfsDirectory> parent) {
|
std::shared_ptr<VectorVfsDirectory>& parent) {
|
||||||
while (true) {
|
while (this_dir_offset != ROMFS_ENTRY_EMPTY) {
|
||||||
auto entry = GetEntry<DirectoryEntry>(file, dir_offset + this_dir_offset);
|
auto entry = GetEntry<DirectoryEntry>(file, dir_offset + this_dir_offset);
|
||||||
auto current = std::make_shared<VectorVfsDirectory>(
|
auto current = std::make_shared<VectorVfsDirectory>(
|
||||||
std::vector<VirtualFile>{}, std::vector<VirtualDir>{}, entry.second);
|
std::vector<VirtualFile>{}, std::vector<VirtualDir>{}, entry.second);
|
||||||
@@ -97,14 +95,12 @@ void ProcessDirectory(VirtualFile file, std::size_t dir_offset, std::size_t file
|
|||||||
}
|
}
|
||||||
|
|
||||||
parent->AddDirectory(current);
|
parent->AddDirectory(current);
|
||||||
if (entry.first.sibling == ROMFS_ENTRY_EMPTY)
|
|
||||||
break;
|
|
||||||
this_dir_offset = entry.first.sibling;
|
this_dir_offset = entry.first.sibling;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
VirtualDir ExtractRomFS(VirtualFile file, RomFSExtractionType type) {
|
VirtualDir ExtractRomFS(VirtualFile file) {
|
||||||
RomFSHeader header{};
|
RomFSHeader header{};
|
||||||
if (file->ReadObject(&header) != sizeof(RomFSHeader))
|
if (file->ReadObject(&header) != sizeof(RomFSHeader))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@@ -113,27 +109,17 @@ VirtualDir ExtractRomFS(VirtualFile file, RomFSExtractionType type) {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
const u64 file_offset = header.file_meta.offset;
|
const u64 file_offset = header.file_meta.offset;
|
||||||
const u64 dir_offset = header.directory_meta.offset + 4;
|
const u64 dir_offset = header.directory_meta.offset;
|
||||||
|
|
||||||
auto root =
|
auto root_container = std::make_shared<VectorVfsDirectory>();
|
||||||
std::make_shared<VectorVfsDirectory>(std::vector<VirtualFile>{}, std::vector<VirtualDir>{},
|
|
||||||
file->GetName(), file->GetContainingDirectory());
|
|
||||||
|
|
||||||
ProcessDirectory(file, dir_offset, file_offset, header.data_offset, 0, root);
|
ProcessDirectory(file, dir_offset, file_offset, header.data_offset, 0, root_container);
|
||||||
|
|
||||||
VirtualDir out = std::move(root);
|
if (auto root = root_container->GetSubdirectory(""); root) {
|
||||||
|
return std::make_shared<CachedVfsDirectory>(std::move(root));
|
||||||
if (type == RomFSExtractionType::SingleDiscard)
|
|
||||||
return out->GetSubdirectories().front();
|
|
||||||
|
|
||||||
while (out->GetSubdirectories().size() == 1 && out->GetFiles().empty()) {
|
|
||||||
if (Common::ToLower(out->GetSubdirectories().front()->GetName()) == "data" &&
|
|
||||||
type == RomFSExtractionType::Truncated)
|
|
||||||
break;
|
|
||||||
out = out->GetSubdirectories().front();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<CachedVfsDirectory>(std::move(out));
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) {
|
VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) {
|
||||||
|
|||||||
@@ -7,16 +7,9 @@
|
|||||||
|
|
||||||
namespace FileSys {
|
namespace FileSys {
|
||||||
|
|
||||||
enum class RomFSExtractionType {
|
|
||||||
Full, // Includes data directory
|
|
||||||
Truncated, // Traverses into data directory
|
|
||||||
SingleDiscard, // Traverses into the first subdirectory of root
|
|
||||||
};
|
|
||||||
|
|
||||||
// Converts a RomFS binary blob to VFS Filesystem
|
// Converts a RomFS binary blob to VFS Filesystem
|
||||||
// Returns nullptr on failure
|
// Returns nullptr on failure
|
||||||
VirtualDir ExtractRomFS(VirtualFile file,
|
VirtualDir ExtractRomFS(VirtualFile file);
|
||||||
RomFSExtractionType type = RomFSExtractionType::Truncated);
|
|
||||||
|
|
||||||
// Converts a VFS filesystem into a RomFS binary
|
// Converts a VFS filesystem into a RomFS binary
|
||||||
// Returns nullptr on failure
|
// Returns nullptr on failure
|
||||||
|
|||||||
@@ -96,18 +96,7 @@ void EmulatedController::ReloadFromSettings() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
controller.color_values = {};
|
controller.color_values = {};
|
||||||
controller.colors_state.fullkey = {
|
ReloadColorsFromSettings();
|
||||||
.body = GetNpadColor(player.body_color_left),
|
|
||||||
.button = GetNpadColor(player.button_color_left),
|
|
||||||
};
|
|
||||||
controller.colors_state.left = {
|
|
||||||
.body = GetNpadColor(player.body_color_left),
|
|
||||||
.button = GetNpadColor(player.button_color_left),
|
|
||||||
};
|
|
||||||
controller.colors_state.right = {
|
|
||||||
.body = GetNpadColor(player.body_color_right),
|
|
||||||
.button = GetNpadColor(player.button_color_right),
|
|
||||||
};
|
|
||||||
|
|
||||||
ring_params[0] = Common::ParamPackage(Settings::values.ringcon_analogs);
|
ring_params[0] = Common::ParamPackage(Settings::values.ringcon_analogs);
|
||||||
|
|
||||||
@@ -128,6 +117,30 @@ void EmulatedController::ReloadFromSettings() {
|
|||||||
ReloadInput();
|
ReloadInput();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void EmulatedController::ReloadColorsFromSettings() {
|
||||||
|
const auto player_index = NpadIdTypeToIndex(npad_id_type);
|
||||||
|
const auto& player = Settings::values.players.GetValue()[player_index];
|
||||||
|
|
||||||
|
// Avoid updating colors if overridden by physical controller
|
||||||
|
if (controller.color_values[LeftIndex].body != 0 &&
|
||||||
|
controller.color_values[RightIndex].body != 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
controller.colors_state.fullkey = {
|
||||||
|
.body = GetNpadColor(player.body_color_left),
|
||||||
|
.button = GetNpadColor(player.button_color_left),
|
||||||
|
};
|
||||||
|
controller.colors_state.left = {
|
||||||
|
.body = GetNpadColor(player.body_color_left),
|
||||||
|
.button = GetNpadColor(player.button_color_left),
|
||||||
|
};
|
||||||
|
controller.colors_state.right = {
|
||||||
|
.body = GetNpadColor(player.body_color_right),
|
||||||
|
.button = GetNpadColor(player.button_color_right),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
void EmulatedController::LoadDevices() {
|
void EmulatedController::LoadDevices() {
|
||||||
// TODO(german77): Use more buttons to detect the correct device
|
// TODO(german77): Use more buttons to detect the correct device
|
||||||
const auto left_joycon = button_params[Settings::NativeButton::DRight];
|
const auto left_joycon = button_params[Settings::NativeButton::DRight];
|
||||||
@@ -1091,30 +1104,30 @@ void EmulatedController::SetBattery(const Common::Input::CallbackStatus& callbac
|
|||||||
|
|
||||||
bool is_charging = false;
|
bool is_charging = false;
|
||||||
bool is_powered = false;
|
bool is_powered = false;
|
||||||
NpadBatteryLevel battery_level = 0;
|
NpadBatteryLevel battery_level = NpadBatteryLevel::Empty;
|
||||||
switch (controller.battery_values[index]) {
|
switch (controller.battery_values[index]) {
|
||||||
case Common::Input::BatteryLevel::Charging:
|
case Common::Input::BatteryLevel::Charging:
|
||||||
is_charging = true;
|
is_charging = true;
|
||||||
is_powered = true;
|
is_powered = true;
|
||||||
battery_level = 6;
|
battery_level = NpadBatteryLevel::Full;
|
||||||
break;
|
break;
|
||||||
case Common::Input::BatteryLevel::Medium:
|
case Common::Input::BatteryLevel::Medium:
|
||||||
battery_level = 6;
|
battery_level = NpadBatteryLevel::High;
|
||||||
break;
|
break;
|
||||||
case Common::Input::BatteryLevel::Low:
|
case Common::Input::BatteryLevel::Low:
|
||||||
battery_level = 4;
|
battery_level = NpadBatteryLevel::Low;
|
||||||
break;
|
break;
|
||||||
case Common::Input::BatteryLevel::Critical:
|
case Common::Input::BatteryLevel::Critical:
|
||||||
battery_level = 2;
|
battery_level = NpadBatteryLevel::Critical;
|
||||||
break;
|
break;
|
||||||
case Common::Input::BatteryLevel::Empty:
|
case Common::Input::BatteryLevel::Empty:
|
||||||
battery_level = 0;
|
battery_level = NpadBatteryLevel::Empty;
|
||||||
break;
|
break;
|
||||||
case Common::Input::BatteryLevel::None:
|
case Common::Input::BatteryLevel::None:
|
||||||
case Common::Input::BatteryLevel::Full:
|
case Common::Input::BatteryLevel::Full:
|
||||||
default:
|
default:
|
||||||
is_powered = true;
|
is_powered = true;
|
||||||
battery_level = 8;
|
battery_level = NpadBatteryLevel::Full;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -253,6 +253,9 @@ public:
|
|||||||
/// Overrides current mapped devices with the stored configuration and reloads all input devices
|
/// Overrides current mapped devices with the stored configuration and reloads all input devices
|
||||||
void ReloadFromSettings();
|
void ReloadFromSettings();
|
||||||
|
|
||||||
|
/// Updates current colors with the ones stored in the configuration
|
||||||
|
void ReloadColorsFromSettings();
|
||||||
|
|
||||||
/// Saves the current mapped configuration
|
/// Saves the current mapped configuration
|
||||||
void SaveCurrentConfig();
|
void SaveCurrentConfig();
|
||||||
|
|
||||||
|
|||||||
@@ -302,6 +302,15 @@ enum class TouchScreenModeForNx : u8 {
|
|||||||
Heat2,
|
Heat2,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// This is nn::hid::system::NpadBatteryLevel
|
||||||
|
enum class NpadBatteryLevel : u32 {
|
||||||
|
Empty,
|
||||||
|
Critical,
|
||||||
|
Low,
|
||||||
|
High,
|
||||||
|
Full,
|
||||||
|
};
|
||||||
|
|
||||||
// This is nn::hid::NpadStyleTag
|
// This is nn::hid::NpadStyleTag
|
||||||
struct NpadStyleTag {
|
struct NpadStyleTag {
|
||||||
union {
|
union {
|
||||||
@@ -385,16 +394,12 @@ struct NpadGcTriggerState {
|
|||||||
};
|
};
|
||||||
static_assert(sizeof(NpadGcTriggerState) == 0x10, "NpadGcTriggerState is an invalid size");
|
static_assert(sizeof(NpadGcTriggerState) == 0x10, "NpadGcTriggerState is an invalid size");
|
||||||
|
|
||||||
// This is nn::hid::system::NpadBatteryLevel
|
|
||||||
using NpadBatteryLevel = u32;
|
|
||||||
static_assert(sizeof(NpadBatteryLevel) == 0x4, "NpadBatteryLevel is an invalid size");
|
|
||||||
|
|
||||||
// This is nn::hid::system::NpadPowerInfo
|
// This is nn::hid::system::NpadPowerInfo
|
||||||
struct NpadPowerInfo {
|
struct NpadPowerInfo {
|
||||||
bool is_powered{};
|
bool is_powered{};
|
||||||
bool is_charging{};
|
bool is_charging{};
|
||||||
INSERT_PADDING_BYTES(0x6);
|
INSERT_PADDING_BYTES(0x6);
|
||||||
NpadBatteryLevel battery_level{8};
|
NpadBatteryLevel battery_level{NpadBatteryLevel::Full};
|
||||||
};
|
};
|
||||||
static_assert(sizeof(NpadPowerInfo) == 0xC, "NpadPowerInfo is an invalid size");
|
static_assert(sizeof(NpadPowerInfo) == 0xC, "NpadPowerInfo is an invalid size");
|
||||||
|
|
||||||
|
|||||||
@@ -222,7 +222,7 @@ Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress*
|
|||||||
};
|
};
|
||||||
|
|
||||||
// We succeeded.
|
// We succeeded.
|
||||||
*out = KPageTable::GetHeapVirtualAddress(kernel.MemoryLayout(), paddr);
|
*out = KPageTable::GetHeapVirtualAddress(kernel, paddr);
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,8 +238,17 @@ void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress addres
|
|||||||
ASSERT(Common::IsAligned(size, alignment));
|
ASSERT(Common::IsAligned(size, alignment));
|
||||||
|
|
||||||
// Close the secure region's pages.
|
// Close the secure region's pages.
|
||||||
kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), address),
|
kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel, address),
|
||||||
size / PageSize);
|
size / PageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Insecure Memory.
|
||||||
|
KResourceLimit* KSystemControl::GetInsecureMemoryResourceLimit(KernelCore& kernel) {
|
||||||
|
return kernel.GetSystemResourceLimit();
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 KSystemControl::GetInsecureMemoryPool() {
|
||||||
|
return static_cast<u32>(KMemoryManager::Pool::SystemNonSecure);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Kernel::Board::Nintendo::Nx
|
} // namespace Kernel::Board::Nintendo::Nx
|
||||||
|
|||||||
@@ -8,7 +8,8 @@
|
|||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
class KernelCore;
|
class KernelCore;
|
||||||
}
|
class KResourceLimit;
|
||||||
|
} // namespace Kernel
|
||||||
|
|
||||||
namespace Kernel::Board::Nintendo::Nx {
|
namespace Kernel::Board::Nintendo::Nx {
|
||||||
|
|
||||||
@@ -40,6 +41,10 @@ public:
|
|||||||
u32 pool);
|
u32 pool);
|
||||||
static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
|
static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
|
||||||
u32 pool);
|
u32 pool);
|
||||||
|
|
||||||
|
// Insecure Memory.
|
||||||
|
static KResourceLimit* GetInsecureMemoryResourceLimit(KernelCore& kernel);
|
||||||
|
static u32 GetInsecureMemoryPool();
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel::Board::Nintendo::Nx
|
} // namespace Kernel::Board::Nintendo::Nx
|
||||||
|
|||||||
@@ -4,14 +4,15 @@
|
|||||||
#include "core/hardware_properties.h"
|
#include "core/hardware_properties.h"
|
||||||
#include "core/hle/kernel/k_capabilities.h"
|
#include "core/hle/kernel/k_capabilities.h"
|
||||||
#include "core/hle/kernel/k_memory_layout.h"
|
#include "core/hle/kernel/k_memory_layout.h"
|
||||||
#include "core/hle/kernel/k_page_table.h"
|
#include "core/hle/kernel/k_process_page_table.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/svc_results.h"
|
#include "core/hle/kernel/svc_results.h"
|
||||||
#include "core/hle/kernel/svc_version.h"
|
#include "core/hle/kernel/svc_version.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) {
|
Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps,
|
||||||
|
KProcessPageTable* page_table) {
|
||||||
// We're initializing an initial process.
|
// We're initializing an initial process.
|
||||||
m_svc_access_flags.reset();
|
m_svc_access_flags.reset();
|
||||||
m_irq_access_flags.reset();
|
m_irq_access_flags.reset();
|
||||||
@@ -41,7 +42,8 @@ Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTabl
|
|||||||
R_RETURN(this->SetCapabilities(kern_caps, page_table));
|
R_RETURN(this->SetCapabilities(kern_caps, page_table));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) {
|
Result KCapabilities::InitializeForUser(std::span<const u32> user_caps,
|
||||||
|
KProcessPageTable* page_table) {
|
||||||
// We're initializing a user process.
|
// We're initializing a user process.
|
||||||
m_svc_access_flags.reset();
|
m_svc_access_flags.reset();
|
||||||
m_irq_access_flags.reset();
|
m_irq_access_flags.reset();
|
||||||
@@ -121,7 +123,7 @@ Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) {
|
|||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) {
|
Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table) {
|
||||||
const auto range_pack = MapRange{cap};
|
const auto range_pack = MapRange{cap};
|
||||||
const auto size_pack = MapRangeSize{size_cap};
|
const auto size_pack = MapRangeSize{size_cap};
|
||||||
|
|
||||||
@@ -142,16 +144,13 @@ Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* p
|
|||||||
? KMemoryPermission::UserRead
|
? KMemoryPermission::UserRead
|
||||||
: KMemoryPermission::UserReadWrite;
|
: KMemoryPermission::UserReadWrite;
|
||||||
if (MapRangeSize{size_cap}.normal) {
|
if (MapRangeSize{size_cap}.normal) {
|
||||||
// R_RETURN(page_table->MapStatic(phys_addr, size, perm));
|
R_RETURN(page_table->MapStatic(phys_addr, size, perm));
|
||||||
} else {
|
} else {
|
||||||
// R_RETURN(page_table->MapIo(phys_addr, size, perm));
|
R_RETURN(page_table->MapIo(phys_addr, size, perm));
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIMPLEMENTED();
|
|
||||||
R_SUCCEED();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
|
Result KCapabilities::MapIoPage_(const u32 cap, KProcessPageTable* page_table) {
|
||||||
// Get/validate address/size
|
// Get/validate address/size
|
||||||
const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize;
|
const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize;
|
||||||
const size_t num_pages = 1;
|
const size_t num_pages = 1;
|
||||||
@@ -160,10 +159,7 @@ Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
|
|||||||
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
|
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
|
||||||
|
|
||||||
// Do the mapping.
|
// Do the mapping.
|
||||||
// R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
|
R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission::UserReadWrite));
|
||||||
|
|
||||||
UNIMPLEMENTED();
|
|
||||||
R_SUCCEED();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename F>
|
template <typename F>
|
||||||
@@ -200,13 +196,11 @@ Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) {
|
|||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) {
|
Result KCapabilities::MapRegion_(const u32 cap, KProcessPageTable* page_table) {
|
||||||
// Map each region into the process's page table.
|
// Map each region into the process's page table.
|
||||||
return ProcessMapRegionCapability(
|
return ProcessMapRegionCapability(
|
||||||
cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
|
cap, [page_table](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
|
||||||
// R_RETURN(page_table->MapRegion(region_type, perm));
|
R_RETURN(page_table->MapRegion(region_type, perm));
|
||||||
UNIMPLEMENTED();
|
|
||||||
R_SUCCEED();
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -280,7 +274,7 @@ Result KCapabilities::SetDebugFlagsCapability(const u32 cap) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
|
Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
|
||||||
KPageTable* page_table) {
|
KProcessPageTable* page_table) {
|
||||||
// Validate this is a capability we can act on.
|
// Validate this is a capability we can act on.
|
||||||
const auto type = GetCapabilityType(cap);
|
const auto type = GetCapabilityType(cap);
|
||||||
R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument);
|
R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument);
|
||||||
@@ -318,7 +312,7 @@ Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) {
|
Result KCapabilities::SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table) {
|
||||||
u32 set_flags = 0, set_svc = 0;
|
u32 set_flags = 0, set_svc = 0;
|
||||||
|
|
||||||
for (size_t i = 0; i < caps.size(); i++) {
|
for (size_t i = 0; i < caps.size(); i++) {
|
||||||
|
|||||||
@@ -15,15 +15,15 @@
|
|||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class KPageTable;
|
class KProcessPageTable;
|
||||||
class KernelCore;
|
class KernelCore;
|
||||||
|
|
||||||
class KCapabilities {
|
class KCapabilities {
|
||||||
public:
|
public:
|
||||||
constexpr explicit KCapabilities() = default;
|
constexpr explicit KCapabilities() = default;
|
||||||
|
|
||||||
Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table);
|
Result InitializeForKip(std::span<const u32> kern_caps, KProcessPageTable* page_table);
|
||||||
Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
|
Result InitializeForUser(std::span<const u32> user_caps, KProcessPageTable* page_table);
|
||||||
|
|
||||||
static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
|
static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
|
||||||
|
|
||||||
@@ -264,9 +264,9 @@ private:
|
|||||||
|
|
||||||
Result SetCorePriorityCapability(const u32 cap);
|
Result SetCorePriorityCapability(const u32 cap);
|
||||||
Result SetSyscallMaskCapability(const u32 cap, u32& set_svc);
|
Result SetSyscallMaskCapability(const u32 cap, u32& set_svc);
|
||||||
Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table);
|
Result MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table);
|
||||||
Result MapIoPage_(const u32 cap, KPageTable* page_table);
|
Result MapIoPage_(const u32 cap, KProcessPageTable* page_table);
|
||||||
Result MapRegion_(const u32 cap, KPageTable* page_table);
|
Result MapRegion_(const u32 cap, KProcessPageTable* page_table);
|
||||||
Result SetInterruptPairCapability(const u32 cap);
|
Result SetInterruptPairCapability(const u32 cap);
|
||||||
Result SetProgramTypeCapability(const u32 cap);
|
Result SetProgramTypeCapability(const u32 cap);
|
||||||
Result SetKernelVersionCapability(const u32 cap);
|
Result SetKernelVersionCapability(const u32 cap);
|
||||||
@@ -277,8 +277,9 @@ private:
|
|||||||
static Result ProcessMapRegionCapability(const u32 cap, F f);
|
static Result ProcessMapRegionCapability(const u32 cap, F f);
|
||||||
static Result CheckMapRegion(KernelCore& kernel, const u32 cap);
|
static Result CheckMapRegion(KernelCore& kernel, const u32 cap);
|
||||||
|
|
||||||
Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table);
|
Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
|
||||||
Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table);
|
KProcessPageTable* page_table);
|
||||||
|
Result SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Svc::SvcAccessFlagSet m_svc_access_flags{};
|
Svc::SvcAccessFlagSet m_svc_access_flags{};
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
|
|||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address,
|
Result KDeviceAddressSpace::Map(KProcessPageTable* page_table, KProcessAddress process_address,
|
||||||
size_t size, u64 device_address, u32 option, bool is_aligned) {
|
size_t size, u64 device_address, u32 option, bool is_aligned) {
|
||||||
// Check that the address falls within the space.
|
// Check that the address falls within the space.
|
||||||
R_UNLESS((m_space_address <= device_address &&
|
R_UNLESS((m_space_address <= device_address &&
|
||||||
@@ -113,7 +113,7 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_
|
|||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address,
|
Result KDeviceAddressSpace::Unmap(KProcessPageTable* page_table, KProcessAddress process_address,
|
||||||
size_t size, u64 device_address) {
|
size_t size, u64 device_address) {
|
||||||
// Check that the address falls within the space.
|
// Check that the address falls within the space.
|
||||||
R_UNLESS((m_space_address <= device_address &&
|
R_UNLESS((m_space_address <= device_address &&
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "core/hle/kernel/k_page_table.h"
|
#include "core/hle/kernel/k_process_page_table.h"
|
||||||
#include "core/hle/kernel/k_typed_address.h"
|
#include "core/hle/kernel/k_typed_address.h"
|
||||||
#include "core/hle/kernel/slab_helpers.h"
|
#include "core/hle/kernel/slab_helpers.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
@@ -31,23 +31,23 @@ public:
|
|||||||
Result Attach(Svc::DeviceName device_name);
|
Result Attach(Svc::DeviceName device_name);
|
||||||
Result Detach(Svc::DeviceName device_name);
|
Result Detach(Svc::DeviceName device_name);
|
||||||
|
|
||||||
Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size,
|
Result MapByForce(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
|
||||||
u64 device_address, u32 option) {
|
u64 device_address, u32 option) {
|
||||||
R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
|
R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size,
|
Result MapAligned(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
|
||||||
u64 device_address, u32 option) {
|
u64 device_address, u32 option) {
|
||||||
R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
|
R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size,
|
Result Unmap(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
|
||||||
u64 device_address);
|
u64 device_address);
|
||||||
|
|
||||||
static void Initialize();
|
static void Initialize();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size,
|
Result Map(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
|
||||||
u64 device_address, u32 option, bool is_aligned);
|
u64 device_address, u32 option, bool is_aligned);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|||||||
@@ -394,6 +394,14 @@ private:
|
|||||||
return region.GetEndAddress();
|
return region.GetEndAddress();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static const KMemoryRegion* Find(const KMemoryLayout& layout, KVirtualAddress address) {
|
||||||
|
return Find(address, layout.GetVirtualMemoryRegionTree());
|
||||||
|
}
|
||||||
|
static const KMemoryRegion* Find(const KMemoryLayout& layout, KPhysicalAddress address) {
|
||||||
|
return Find(address, layout.GetPhysicalMemoryRegionTree());
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u64 m_linear_phys_to_virt_diff{};
|
u64 m_linear_phys_to_virt_diff{};
|
||||||
u64 m_linear_virt_to_phys_diff{};
|
u64 m_linear_virt_to_phys_diff{};
|
||||||
|
|||||||
@@ -456,8 +456,7 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
|
void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
|
||||||
auto optimize_pa =
|
auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
|
||||||
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
|
||||||
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
||||||
|
|
||||||
std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize()));
|
std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize()));
|
||||||
@@ -465,8 +464,7 @@ void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
|
|||||||
|
|
||||||
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||||
size_t num_pages) {
|
size_t num_pages) {
|
||||||
auto optimize_pa =
|
auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
|
||||||
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
|
||||||
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
||||||
|
|
||||||
// Get the range we're tracking.
|
// Get the range we're tracking.
|
||||||
@@ -485,8 +483,7 @@ void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysi
|
|||||||
|
|
||||||
void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||||
size_t num_pages) {
|
size_t num_pages) {
|
||||||
auto optimize_pa =
|
auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
|
||||||
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
|
||||||
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
||||||
|
|
||||||
// Get the range we're tracking.
|
// Get the range we're tracking.
|
||||||
@@ -506,8 +503,7 @@ void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysica
|
|||||||
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||||
size_t num_pages, u8 fill_pattern) {
|
size_t num_pages, u8 fill_pattern) {
|
||||||
auto& device_memory = kernel.System().DeviceMemory();
|
auto& device_memory = kernel.System().DeviceMemory();
|
||||||
auto optimize_pa =
|
auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
|
||||||
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
|
||||||
auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa);
|
auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa);
|
||||||
|
|
||||||
// We want to return whether any pages were newly allocated.
|
// We want to return whether any pages were newly allocated.
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -3,548 +3,14 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <memory>
|
#include "core/hle/kernel/k_page_table_base.h"
|
||||||
|
|
||||||
#include "common/common_funcs.h"
|
|
||||||
#include "common/page_table.h"
|
|
||||||
#include "core/file_sys/program_metadata.h"
|
|
||||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
|
||||||
#include "core/hle/kernel/k_light_lock.h"
|
|
||||||
#include "core/hle/kernel/k_memory_block.h"
|
|
||||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
|
||||||
#include "core/hle/kernel/k_memory_layout.h"
|
|
||||||
#include "core/hle/kernel/k_memory_manager.h"
|
|
||||||
#include "core/hle/kernel/k_typed_address.h"
|
|
||||||
#include "core/hle/result.h"
|
|
||||||
#include "core/memory.h"
|
|
||||||
|
|
||||||
namespace Core {
|
|
||||||
class System;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
enum class DisableMergeAttribute : u8 {
|
class KPageTable final : public KPageTableBase {
|
||||||
None = (0U << 0),
|
|
||||||
DisableHead = (1U << 0),
|
|
||||||
DisableHeadAndBody = (1U << 1),
|
|
||||||
EnableHeadAndBody = (1U << 2),
|
|
||||||
DisableTail = (1U << 3),
|
|
||||||
EnableTail = (1U << 4),
|
|
||||||
EnableAndMergeHeadBodyTail = (1U << 5),
|
|
||||||
EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
|
|
||||||
DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct KPageProperties {
|
|
||||||
KMemoryPermission perm;
|
|
||||||
bool io;
|
|
||||||
bool uncached;
|
|
||||||
DisableMergeAttribute disable_merge_attributes;
|
|
||||||
};
|
|
||||||
static_assert(std::is_trivial_v<KPageProperties>);
|
|
||||||
static_assert(sizeof(KPageProperties) == sizeof(u32));
|
|
||||||
|
|
||||||
class KBlockInfoManager;
|
|
||||||
class KMemoryBlockManager;
|
|
||||||
class KResourceLimit;
|
|
||||||
class KSystemResource;
|
|
||||||
|
|
||||||
class KPageTable final {
|
|
||||||
protected:
|
|
||||||
struct PageLinkedList;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
|
explicit KPageTable(KernelCore& kernel) : KPageTableBase(kernel) {}
|
||||||
|
~KPageTable() = default;
|
||||||
YUZU_NON_COPYABLE(KPageTable);
|
|
||||||
YUZU_NON_MOVEABLE(KPageTable);
|
|
||||||
|
|
||||||
explicit KPageTable(Core::System& system_);
|
|
||||||
~KPageTable();
|
|
||||||
|
|
||||||
Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
|
|
||||||
bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
|
|
||||||
KProcessAddress code_addr, size_t code_size,
|
|
||||||
KSystemResource* system_resource, KResourceLimit* resource_limit,
|
|
||||||
Core::Memory::Memory& memory);
|
|
||||||
|
|
||||||
void Finalize();
|
|
||||||
|
|
||||||
Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state,
|
|
||||||
KMemoryPermission perm);
|
|
||||||
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
|
|
||||||
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
|
|
||||||
ICacheInvalidationStrategy icache_invalidation_strategy);
|
|
||||||
Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table,
|
|
||||||
KProcessAddress src_addr);
|
|
||||||
Result MapPhysicalMemory(KProcessAddress addr, size_t size);
|
|
||||||
Result UnmapPhysicalMemory(KProcessAddress addr, size_t size);
|
|
||||||
Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
|
|
||||||
Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
|
|
||||||
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
|
|
||||||
Svc::MemoryPermission svc_perm);
|
|
||||||
KMemoryInfo QueryInfo(KProcessAddress addr);
|
|
||||||
Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
|
|
||||||
Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr);
|
|
||||||
Result SetMaxHeapSize(size_t size);
|
|
||||||
Result SetHeapSize(u64* out, size_t size);
|
|
||||||
Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
|
|
||||||
KMemoryPermission perm, bool is_aligned, bool check_heap);
|
|
||||||
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
|
|
||||||
|
|
||||||
Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size);
|
|
||||||
|
|
||||||
Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
|
|
||||||
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
|
|
||||||
|
|
||||||
Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
|
|
||||||
KPageTable& src_page_table, KMemoryPermission test_perm,
|
|
||||||
KMemoryState dst_state, bool send);
|
|
||||||
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
|
|
||||||
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
|
|
||||||
|
|
||||||
Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
|
|
||||||
KMemoryPermission perm);
|
|
||||||
Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
|
|
||||||
Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size);
|
|
||||||
Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg);
|
|
||||||
Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
|
|
||||||
KMemoryState state_mask, KMemoryState state,
|
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
|
||||||
|
|
||||||
Common::PageTable& PageTableImpl() {
|
|
||||||
return *m_page_table_impl;
|
|
||||||
}
|
|
||||||
|
|
||||||
const Common::PageTable& PageTableImpl() const {
|
|
||||||
return *m_page_table_impl;
|
|
||||||
}
|
|
||||||
|
|
||||||
KBlockInfoManager* GetBlockInfoManager() {
|
|
||||||
return m_block_info_manager;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
|
|
||||||
KPhysicalAddress phys_addr, KProcessAddress region_start,
|
|
||||||
size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
|
||||||
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
|
|
||||||
region_num_pages, state, perm));
|
|
||||||
}
|
|
||||||
|
|
||||||
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
|
|
||||||
KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
|
|
||||||
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
|
||||||
this->GetRegionAddress(state),
|
|
||||||
this->GetRegionSize(state) / PageSize, state, perm));
|
|
||||||
}
|
|
||||||
|
|
||||||
Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
|
|
||||||
KMemoryPermission perm) {
|
|
||||||
R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
|
|
||||||
this->GetRegionAddress(state),
|
|
||||||
this->GetRegionSize(state) / PageSize, state, perm));
|
|
||||||
}
|
|
||||||
|
|
||||||
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
|
|
||||||
KMemoryPermission perm);
|
|
||||||
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
|
|
||||||
|
|
||||||
Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
|
|
||||||
KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
|
|
||||||
KMemoryPermission perm);
|
|
||||||
Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
|
|
||||||
KMemoryPermission perm);
|
|
||||||
Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
|
|
||||||
void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
|
|
||||||
const KPageGroup& pg);
|
|
||||||
|
|
||||||
KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
|
|
||||||
size_t GetRegionSize(Svc::MemoryState state) const;
|
|
||||||
bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
|
|
||||||
|
|
||||||
KProcessAddress GetRegionAddress(KMemoryState state) const {
|
|
||||||
return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
|
||||||
}
|
|
||||||
size_t GetRegionSize(KMemoryState state) const {
|
|
||||||
return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
|
||||||
}
|
|
||||||
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
|
|
||||||
return this->CanContain(addr, size,
|
|
||||||
static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
|
||||||
struct PageLinkedList {
|
|
||||||
private:
|
|
||||||
struct Node {
|
|
||||||
Node* m_next;
|
|
||||||
std::array<u8, PageSize - sizeof(Node*)> m_buffer;
|
|
||||||
};
|
|
||||||
|
|
||||||
public:
|
|
||||||
constexpr PageLinkedList() = default;
|
|
||||||
|
|
||||||
void Push(Node* n) {
|
|
||||||
ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
|
|
||||||
n->m_next = m_root;
|
|
||||||
m_root = n;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Push(Core::Memory::Memory& memory, KVirtualAddress addr) {
|
|
||||||
this->Push(memory.GetPointer<Node>(GetInteger(addr)));
|
|
||||||
}
|
|
||||||
|
|
||||||
Node* Peek() const {
|
|
||||||
return m_root;
|
|
||||||
}
|
|
||||||
|
|
||||||
Node* Pop() {
|
|
||||||
Node* const r = m_root;
|
|
||||||
|
|
||||||
m_root = r->m_next;
|
|
||||||
r->m_next = nullptr;
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Node* m_root{};
|
|
||||||
};
|
|
||||||
static_assert(std::is_trivially_destructible<PageLinkedList>::value);
|
|
||||||
|
|
||||||
private:
|
|
||||||
enum class OperationType : u32 {
|
|
||||||
Map = 0,
|
|
||||||
MapGroup = 1,
|
|
||||||
MapFirstGroup = 2,
|
|
||||||
Unmap = 3,
|
|
||||||
ChangePermissions = 4,
|
|
||||||
ChangePermissionsAndRefresh = 5,
|
|
||||||
ChangePermissionsAndRefreshAndFlush = 6,
|
|
||||||
Separate = 7,
|
|
||||||
};
|
|
||||||
|
|
||||||
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
|
|
||||||
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
|
|
||||||
|
|
||||||
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
|
|
||||||
KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
|
|
||||||
size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
|
||||||
bool IsRegionContiguous(KProcessAddress addr, u64 size) const;
|
|
||||||
void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list);
|
|
||||||
KMemoryInfo QueryInfoImpl(KProcessAddress addr);
|
|
||||||
KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
|
|
||||||
u64 needed_num_pages, size_t align);
|
|
||||||
Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
|
|
||||||
OperationType operation);
|
|
||||||
Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
|
|
||||||
OperationType operation, KPhysicalAddress map_addr = 0);
|
|
||||||
void FinalizeUpdate(PageLinkedList* page_list);
|
|
||||||
|
|
||||||
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
|
|
||||||
size_t num_pages, size_t alignment, size_t offset,
|
|
||||||
size_t guard_pages);
|
|
||||||
|
|
||||||
Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
|
|
||||||
KMemoryState state_mask, KMemoryState state,
|
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
|
||||||
Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
|
|
||||||
KMemoryState state, KMemoryPermission perm_mask,
|
|
||||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
|
||||||
KMemoryAttribute attr) const {
|
|
||||||
R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
|
||||||
perm, attr_mask, attr));
|
|
||||||
}
|
|
||||||
|
|
||||||
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
|
||||||
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
|
||||||
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
|
||||||
KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
|
|
||||||
KMemoryState state_mask, KMemoryState state,
|
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
|
||||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
|
||||||
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
|
||||||
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
|
||||||
KProcessAddress addr, size_t size, KMemoryState state_mask,
|
|
||||||
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
|
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
|
||||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
|
||||||
Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
|
|
||||||
KMemoryState state_mask, KMemoryState state,
|
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
|
||||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
|
||||||
R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
|
||||||
state_mask, state, perm_mask, perm, attr_mask, attr,
|
|
||||||
ignore_attr));
|
|
||||||
}
|
|
||||||
Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
|
|
||||||
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
|
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
|
||||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
|
||||||
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
|
||||||
attr_mask, attr, ignore_attr));
|
|
||||||
}
|
|
||||||
|
|
||||||
Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
|
|
||||||
KProcessAddress addr, size_t size, KMemoryState state_mask,
|
|
||||||
KMemoryState state, KMemoryPermission perm_mask,
|
|
||||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
|
||||||
KMemoryAttribute attr, KMemoryPermission new_perm,
|
|
||||||
KMemoryAttribute lock_attr);
|
|
||||||
Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
|
|
||||||
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
|
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
|
||||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr,
|
|
||||||
const KPageGroup* pg);
|
|
||||||
|
|
||||||
Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
|
|
||||||
bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
|
|
||||||
|
|
||||||
bool IsLockedByCurrentThread() const {
|
|
||||||
return m_general_lock.IsLockedByCurrentThread();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) {
|
|
||||||
ASSERT(this->IsLockedByCurrentThread());
|
|
||||||
|
|
||||||
return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
|
|
||||||
ASSERT(this->IsLockedByCurrentThread());
|
|
||||||
|
|
||||||
*out = GetPhysicalAddr(virt_addr);
|
|
||||||
|
|
||||||
return *out != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
|
|
||||||
KProcessAddress address, size_t size, KMemoryPermission test_perm,
|
|
||||||
KMemoryState dst_state);
|
|
||||||
Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
|
|
||||||
KMemoryPermission test_perm, KMemoryState dst_state,
|
|
||||||
KPageTable& src_page_table, bool send);
|
|
||||||
void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
|
|
||||||
size_t size, KMemoryPermission prot_perm);
|
|
||||||
|
|
||||||
Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
|
|
||||||
size_t num_pages, KMemoryPermission perm);
|
|
||||||
Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
|
|
||||||
const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
|
|
||||||
|
|
||||||
mutable KLightLock m_general_lock;
|
|
||||||
mutable KLightLock m_map_physical_memory_lock;
|
|
||||||
|
|
||||||
public:
|
|
||||||
constexpr KProcessAddress GetAddressSpaceStart() const {
|
|
||||||
return m_address_space_start;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetAddressSpaceEnd() const {
|
|
||||||
return m_address_space_end;
|
|
||||||
}
|
|
||||||
constexpr size_t GetAddressSpaceSize() const {
|
|
||||||
return m_address_space_end - m_address_space_start;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetHeapRegionStart() const {
|
|
||||||
return m_heap_region_start;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetHeapRegionEnd() const {
|
|
||||||
return m_heap_region_end;
|
|
||||||
}
|
|
||||||
constexpr size_t GetHeapRegionSize() const {
|
|
||||||
return m_heap_region_end - m_heap_region_start;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetAliasRegionStart() const {
|
|
||||||
return m_alias_region_start;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetAliasRegionEnd() const {
|
|
||||||
return m_alias_region_end;
|
|
||||||
}
|
|
||||||
constexpr size_t GetAliasRegionSize() const {
|
|
||||||
return m_alias_region_end - m_alias_region_start;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetStackRegionStart() const {
|
|
||||||
return m_stack_region_start;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetStackRegionEnd() const {
|
|
||||||
return m_stack_region_end;
|
|
||||||
}
|
|
||||||
constexpr size_t GetStackRegionSize() const {
|
|
||||||
return m_stack_region_end - m_stack_region_start;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetKernelMapRegionStart() const {
|
|
||||||
return m_kernel_map_region_start;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetKernelMapRegionEnd() const {
|
|
||||||
return m_kernel_map_region_end;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetCodeRegionStart() const {
|
|
||||||
return m_code_region_start;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetCodeRegionEnd() const {
|
|
||||||
return m_code_region_end;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetAliasCodeRegionStart() const {
|
|
||||||
return m_alias_code_region_start;
|
|
||||||
}
|
|
||||||
constexpr KProcessAddress GetAliasCodeRegionEnd() const {
|
|
||||||
return m_alias_code_region_end;
|
|
||||||
}
|
|
||||||
constexpr size_t GetAliasCodeRegionSize() const {
|
|
||||||
return m_alias_code_region_end - m_alias_code_region_start;
|
|
||||||
}
|
|
||||||
size_t GetNormalMemorySize() const {
|
|
||||||
KScopedLightLock lk(m_general_lock);
|
|
||||||
return GetHeapSize() + m_mapped_physical_memory_size;
|
|
||||||
}
|
|
||||||
constexpr size_t GetAddressSpaceWidth() const {
|
|
||||||
return m_address_space_width;
|
|
||||||
}
|
|
||||||
constexpr size_t GetHeapSize() const {
|
|
||||||
return m_current_heap_end - m_heap_region_start;
|
|
||||||
}
|
|
||||||
constexpr size_t GetNumGuardPages() const {
|
|
||||||
return IsKernel() ? 1 : 4;
|
|
||||||
}
|
|
||||||
KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const {
|
|
||||||
const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
|
|
||||||
ASSERT(backing_addr);
|
|
||||||
return backing_addr + GetInteger(addr);
|
|
||||||
}
|
|
||||||
constexpr bool Contains(KProcessAddress addr) const {
|
|
||||||
return m_address_space_start <= addr && addr <= m_address_space_end - 1;
|
|
||||||
}
|
|
||||||
constexpr bool Contains(KProcessAddress addr, size_t size) const {
|
|
||||||
return m_address_space_start <= addr && addr < addr + size &&
|
|
||||||
addr + size - 1 <= m_address_space_end - 1;
|
|
||||||
}
|
|
||||||
constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
|
|
||||||
return this->Contains(addr, size) && m_alias_region_start <= addr &&
|
|
||||||
addr + size - 1 <= m_alias_region_end - 1;
|
|
||||||
}
|
|
||||||
constexpr bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
|
|
||||||
return this->Contains(addr, size) && m_heap_region_start <= addr &&
|
|
||||||
addr + size - 1 <= m_heap_region_end - 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout,
|
|
||||||
KPhysicalAddress addr) {
|
|
||||||
return layout.GetLinearVirtualAddress(addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout,
|
|
||||||
KVirtualAddress addr) {
|
|
||||||
return layout.GetLinearPhysicalAddress(addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout,
|
|
||||||
KPhysicalAddress addr) {
|
|
||||||
return GetLinearMappedVirtualAddress(layout, addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout,
|
|
||||||
KVirtualAddress addr) {
|
|
||||||
return GetLinearMappedPhysicalAddress(layout, addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout,
|
|
||||||
KPhysicalAddress addr) {
|
|
||||||
return GetLinearMappedVirtualAddress(layout, addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout,
|
|
||||||
KVirtualAddress addr) {
|
|
||||||
return GetLinearMappedPhysicalAddress(layout, addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
constexpr bool IsKernel() const {
|
|
||||||
return m_is_kernel;
|
|
||||||
}
|
|
||||||
constexpr bool IsAslrEnabled() const {
|
|
||||||
return m_enable_aslr;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
|
||||||
return (m_address_space_start <= addr) &&
|
|
||||||
(num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
|
|
||||||
(addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
class KScopedPageTableUpdater {
|
|
||||||
private:
|
|
||||||
KPageTable* m_pt{};
|
|
||||||
PageLinkedList m_ll;
|
|
||||||
|
|
||||||
public:
|
|
||||||
explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {}
|
|
||||||
explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {}
|
|
||||||
~KScopedPageTableUpdater() {
|
|
||||||
m_pt->FinalizeUpdate(this->GetPageList());
|
|
||||||
}
|
|
||||||
|
|
||||||
PageLinkedList* GetPageList() {
|
|
||||||
return std::addressof(m_ll);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
private:
|
|
||||||
KProcessAddress m_address_space_start{};
|
|
||||||
KProcessAddress m_address_space_end{};
|
|
||||||
KProcessAddress m_heap_region_start{};
|
|
||||||
KProcessAddress m_heap_region_end{};
|
|
||||||
KProcessAddress m_current_heap_end{};
|
|
||||||
KProcessAddress m_alias_region_start{};
|
|
||||||
KProcessAddress m_alias_region_end{};
|
|
||||||
KProcessAddress m_stack_region_start{};
|
|
||||||
KProcessAddress m_stack_region_end{};
|
|
||||||
KProcessAddress m_kernel_map_region_start{};
|
|
||||||
KProcessAddress m_kernel_map_region_end{};
|
|
||||||
KProcessAddress m_code_region_start{};
|
|
||||||
KProcessAddress m_code_region_end{};
|
|
||||||
KProcessAddress m_alias_code_region_start{};
|
|
||||||
KProcessAddress m_alias_code_region_end{};
|
|
||||||
|
|
||||||
size_t m_max_heap_size{};
|
|
||||||
size_t m_mapped_physical_memory_size{};
|
|
||||||
size_t m_mapped_unsafe_physical_memory{};
|
|
||||||
size_t m_mapped_insecure_memory{};
|
|
||||||
size_t m_mapped_ipc_server_memory{};
|
|
||||||
size_t m_address_space_width{};
|
|
||||||
|
|
||||||
KMemoryBlockManager m_memory_block_manager;
|
|
||||||
u32 m_allocate_option{};
|
|
||||||
|
|
||||||
bool m_is_kernel{};
|
|
||||||
bool m_enable_aslr{};
|
|
||||||
bool m_enable_device_address_space_merge{};
|
|
||||||
|
|
||||||
KMemoryBlockSlabManager* m_memory_block_slab_manager{};
|
|
||||||
KBlockInfoManager* m_block_info_manager{};
|
|
||||||
KResourceLimit* m_resource_limit{};
|
|
||||||
|
|
||||||
u32 m_heap_fill_value{};
|
|
||||||
u32 m_ipc_fill_value{};
|
|
||||||
u32 m_stack_fill_value{};
|
|
||||||
const KMemoryRegion* m_cached_physical_heap_region{};
|
|
||||||
|
|
||||||
KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
|
|
||||||
KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
|
|
||||||
|
|
||||||
std::unique_ptr<Common::PageTable> m_page_table_impl;
|
|
||||||
|
|
||||||
Core::System& m_system;
|
|
||||||
KernelCore& m_kernel;
|
|
||||||
Core::Memory::Memory* m_memory{};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|||||||
5716
src/core/hle/kernel/k_page_table_base.cpp
Normal file
5716
src/core/hle/kernel/k_page_table_base.cpp
Normal file
File diff suppressed because it is too large
Load Diff
759
src/core/hle/kernel/k_page_table_base.h
Normal file
759
src/core/hle/kernel/k_page_table_base.h
Normal file
@@ -0,0 +1,759 @@
|
|||||||
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/page_table.h"
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
||||||
|
#include "core/hle/kernel/k_light_lock.h"
|
||||||
|
#include "core/hle/kernel/k_memory_block.h"
|
||||||
|
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||||
|
#include "core/hle/kernel/k_memory_layout.h"
|
||||||
|
#include "core/hle/kernel/k_memory_manager.h"
|
||||||
|
#include "core/hle/kernel/k_typed_address.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/result.h"
|
||||||
|
#include "core/memory.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
enum class DisableMergeAttribute : u8 {
|
||||||
|
None = (0U << 0),
|
||||||
|
|
||||||
|
DisableHead = (1U << 0),
|
||||||
|
DisableHeadAndBody = (1U << 1),
|
||||||
|
EnableHeadAndBody = (1U << 2),
|
||||||
|
DisableTail = (1U << 3),
|
||||||
|
EnableTail = (1U << 4),
|
||||||
|
EnableAndMergeHeadBodyTail = (1U << 5),
|
||||||
|
|
||||||
|
EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
|
||||||
|
DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
|
||||||
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(DisableMergeAttribute);
|
||||||
|
|
||||||
|
struct KPageProperties {
|
||||||
|
KMemoryPermission perm;
|
||||||
|
bool io;
|
||||||
|
bool uncached;
|
||||||
|
DisableMergeAttribute disable_merge_attributes;
|
||||||
|
};
|
||||||
|
static_assert(std::is_trivial_v<KPageProperties>);
|
||||||
|
static_assert(sizeof(KPageProperties) == sizeof(u32));
|
||||||
|
|
||||||
|
class KResourceLimit;
|
||||||
|
class KSystemResource;
|
||||||
|
|
||||||
|
class KPageTableBase {
|
||||||
|
YUZU_NON_COPYABLE(KPageTableBase);
|
||||||
|
YUZU_NON_MOVEABLE(KPageTableBase);
|
||||||
|
|
||||||
|
public:
|
||||||
|
using TraversalEntry = Common::PageTable::TraversalEntry;
|
||||||
|
using TraversalContext = Common::PageTable::TraversalContext;
|
||||||
|
|
||||||
|
class MemoryRange {
|
||||||
|
private:
|
||||||
|
KernelCore& m_kernel;
|
||||||
|
KPhysicalAddress m_address;
|
||||||
|
size_t m_size;
|
||||||
|
bool m_heap;
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit MemoryRange(KernelCore& kernel)
|
||||||
|
: m_kernel(kernel), m_address(0), m_size(0), m_heap(false) {}
|
||||||
|
|
||||||
|
void Set(KPhysicalAddress address, size_t size, bool heap) {
|
||||||
|
m_address = address;
|
||||||
|
m_size = size;
|
||||||
|
m_heap = heap;
|
||||||
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress GetAddress() const {
|
||||||
|
return m_address;
|
||||||
|
}
|
||||||
|
size_t GetSize() const {
|
||||||
|
return m_size;
|
||||||
|
}
|
||||||
|
bool IsHeap() const {
|
||||||
|
return m_heap;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Open();
|
||||||
|
void Close();
|
||||||
|
};
|
||||||
|
|
||||||
|
protected:
|
||||||
|
enum MemoryFillValue : u8 {
|
||||||
|
MemoryFillValue_Zero = 0,
|
||||||
|
MemoryFillValue_Stack = 'X',
|
||||||
|
MemoryFillValue_Ipc = 'Y',
|
||||||
|
MemoryFillValue_Heap = 'Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class OperationType {
|
||||||
|
Map = 0,
|
||||||
|
MapGroup = 1,
|
||||||
|
MapFirstGroup = 2,
|
||||||
|
Unmap = 3,
|
||||||
|
ChangePermissions = 4,
|
||||||
|
ChangePermissionsAndRefresh = 5,
|
||||||
|
ChangePermissionsAndRefreshAndFlush = 6,
|
||||||
|
Separate = 7,
|
||||||
|
};
|
||||||
|
|
||||||
|
static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;
|
||||||
|
static constexpr size_t RegionAlignment = 2_MiB;
|
||||||
|
static_assert(RegionAlignment == KernelAslrAlignment);
|
||||||
|
|
||||||
|
struct PageLinkedList {
|
||||||
|
private:
|
||||||
|
struct Node {
|
||||||
|
Node* m_next;
|
||||||
|
std::array<u8, PageSize - sizeof(Node*)> m_buffer;
|
||||||
|
};
|
||||||
|
static_assert(std::is_trivial_v<Node>);
|
||||||
|
|
||||||
|
private:
|
||||||
|
Node* m_root{};
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr PageLinkedList() : m_root(nullptr) {}
|
||||||
|
|
||||||
|
void Push(Node* n) {
|
||||||
|
ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
|
||||||
|
n->m_next = m_root;
|
||||||
|
m_root = n;
|
||||||
|
}
|
||||||
|
|
||||||
|
Node* Peek() const {
|
||||||
|
return m_root;
|
||||||
|
}
|
||||||
|
|
||||||
|
Node* Pop() {
|
||||||
|
Node* const r = m_root;
|
||||||
|
|
||||||
|
m_root = r->m_next;
|
||||||
|
r->m_next = nullptr;
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
static_assert(std::is_trivially_destructible_v<PageLinkedList>);
|
||||||
|
|
||||||
|
static constexpr auto DefaultMemoryIgnoreAttr =
|
||||||
|
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
|
||||||
|
|
||||||
|
static constexpr size_t GetAddressSpaceWidth(Svc::CreateProcessFlag as_type) {
|
||||||
|
switch (static_cast<Svc::CreateProcessFlag>(as_type &
|
||||||
|
Svc::CreateProcessFlag::AddressSpaceMask)) {
|
||||||
|
case Svc::CreateProcessFlag::AddressSpace64Bit:
|
||||||
|
return 39;
|
||||||
|
case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
|
||||||
|
return 36;
|
||||||
|
case Svc::CreateProcessFlag::AddressSpace32Bit:
|
||||||
|
case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
|
||||||
|
return 32;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
class KScopedPageTableUpdater {
|
||||||
|
private:
|
||||||
|
KPageTableBase* m_pt;
|
||||||
|
PageLinkedList m_ll;
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit KScopedPageTableUpdater(KPageTableBase* pt) : m_pt(pt), m_ll() {}
|
||||||
|
explicit KScopedPageTableUpdater(KPageTableBase& pt)
|
||||||
|
: KScopedPageTableUpdater(std::addressof(pt)) {}
|
||||||
|
~KScopedPageTableUpdater() {
|
||||||
|
m_pt->FinalizeUpdate(this->GetPageList());
|
||||||
|
}
|
||||||
|
|
||||||
|
PageLinkedList* GetPageList() {
|
||||||
|
return std::addressof(m_ll);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
KernelCore& m_kernel;
|
||||||
|
Core::System& m_system;
|
||||||
|
KProcessAddress m_address_space_start{};
|
||||||
|
KProcessAddress m_address_space_end{};
|
||||||
|
KProcessAddress m_heap_region_start{};
|
||||||
|
KProcessAddress m_heap_region_end{};
|
||||||
|
KProcessAddress m_current_heap_end{};
|
||||||
|
KProcessAddress m_alias_region_start{};
|
||||||
|
KProcessAddress m_alias_region_end{};
|
||||||
|
KProcessAddress m_stack_region_start{};
|
||||||
|
KProcessAddress m_stack_region_end{};
|
||||||
|
KProcessAddress m_kernel_map_region_start{};
|
||||||
|
KProcessAddress m_kernel_map_region_end{};
|
||||||
|
KProcessAddress m_alias_code_region_start{};
|
||||||
|
KProcessAddress m_alias_code_region_end{};
|
||||||
|
KProcessAddress m_code_region_start{};
|
||||||
|
KProcessAddress m_code_region_end{};
|
||||||
|
size_t m_max_heap_size{};
|
||||||
|
size_t m_mapped_physical_memory_size{};
|
||||||
|
size_t m_mapped_unsafe_physical_memory{};
|
||||||
|
size_t m_mapped_insecure_memory{};
|
||||||
|
size_t m_mapped_ipc_server_memory{};
|
||||||
|
mutable KLightLock m_general_lock;
|
||||||
|
mutable KLightLock m_map_physical_memory_lock;
|
||||||
|
KLightLock m_device_map_lock;
|
||||||
|
std::unique_ptr<Common::PageTable> m_impl{};
|
||||||
|
Core::Memory::Memory* m_memory{};
|
||||||
|
KMemoryBlockManager m_memory_block_manager{};
|
||||||
|
u32 m_allocate_option{};
|
||||||
|
u32 m_address_space_width{};
|
||||||
|
bool m_is_kernel{};
|
||||||
|
bool m_enable_aslr{};
|
||||||
|
bool m_enable_device_address_space_merge{};
|
||||||
|
KMemoryBlockSlabManager* m_memory_block_slab_manager{};
|
||||||
|
KBlockInfoManager* m_block_info_manager{};
|
||||||
|
KResourceLimit* m_resource_limit{};
|
||||||
|
const KMemoryRegion* m_cached_physical_linear_region{};
|
||||||
|
const KMemoryRegion* m_cached_physical_heap_region{};
|
||||||
|
MemoryFillValue m_heap_fill_value{};
|
||||||
|
MemoryFillValue m_ipc_fill_value{};
|
||||||
|
MemoryFillValue m_stack_fill_value{};
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit KPageTableBase(KernelCore& kernel);
|
||||||
|
~KPageTableBase();
|
||||||
|
|
||||||
|
Result InitializeForKernel(bool is_64_bit, KVirtualAddress start, KVirtualAddress end,
|
||||||
|
Core::Memory::Memory& memory);
|
||||||
|
Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
|
||||||
|
bool enable_device_address_space_merge, bool from_back,
|
||||||
|
KMemoryManager::Pool pool, KProcessAddress code_address,
|
||||||
|
size_t code_size, KSystemResource* system_resource,
|
||||||
|
KResourceLimit* resource_limit, Core::Memory::Memory& memory);
|
||||||
|
|
||||||
|
void Finalize();
|
||||||
|
|
||||||
|
bool IsKernel() const {
|
||||||
|
return m_is_kernel;
|
||||||
|
}
|
||||||
|
bool IsAslrEnabled() const {
|
||||||
|
return m_enable_aslr;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Contains(KProcessAddress addr) const {
|
||||||
|
return m_address_space_start <= addr && addr <= m_address_space_end - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Contains(KProcessAddress addr, size_t size) const {
|
||||||
|
return m_address_space_start <= addr && addr < addr + size &&
|
||||||
|
addr + size - 1 <= m_address_space_end - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
|
||||||
|
return this->Contains(addr, size) && m_alias_region_start <= addr &&
|
||||||
|
addr + size - 1 <= m_alias_region_end - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
|
||||||
|
return this->Contains(addr, size) && m_heap_region_start <= addr &&
|
||||||
|
addr + size - 1 <= m_heap_region_end - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
|
||||||
|
// Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the
|
||||||
|
// alias code region.
|
||||||
|
return this->CanContain(addr, size, Svc::MemoryState::AliasCode);
|
||||||
|
}
|
||||||
|
|
||||||
|
KScopedLightLock AcquireDeviceMapLock() {
|
||||||
|
return KScopedLightLock(m_device_map_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
|
||||||
|
size_t GetRegionSize(Svc::MemoryState state) const;
|
||||||
|
bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
|
||||||
|
|
||||||
|
KProcessAddress GetRegionAddress(KMemoryState state) const {
|
||||||
|
return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
||||||
|
}
|
||||||
|
size_t GetRegionSize(KMemoryState state) const {
|
||||||
|
return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
||||||
|
}
|
||||||
|
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
|
||||||
|
return this->CanContain(addr, size,
|
||||||
|
static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
Core::Memory::Memory& GetMemory() {
|
||||||
|
return *m_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
Core::Memory::Memory& GetMemory() const {
|
||||||
|
return *m_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
Common::PageTable& GetImpl() {
|
||||||
|
return *m_impl;
|
||||||
|
}
|
||||||
|
|
||||||
|
Common::PageTable& GetImpl() const {
|
||||||
|
return *m_impl;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetNumGuardPages() const {
|
||||||
|
return this->IsKernel() ? 1 : 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions
|
||||||
|
// in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived
|
||||||
|
// class, and this avoids unnecessary virtual function calls.
|
||||||
|
Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
|
||||||
|
KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties,
|
||||||
|
OperationType operation, bool reuse_ll);
|
||||||
|
Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
|
||||||
|
const KPageGroup& page_group, const KPageProperties properties,
|
||||||
|
OperationType operation, bool reuse_ll);
|
||||||
|
void FinalizeUpdate(PageLinkedList* page_list);
|
||||||
|
|
||||||
|
bool IsLockedByCurrentThread() const {
|
||||||
|
return m_general_lock.IsLockedByCurrentThread();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||||
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
|
||||||
|
m_cached_physical_linear_region, phys_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
||||||
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
|
||||||
|
m_cached_physical_linear_region, phys_addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||||
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
|
||||||
|
phys_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
||||||
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
|
||||||
|
phys_addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
|
||||||
|
ASSERT(!this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
|
||||||
|
phys_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
||||||
|
return (m_address_space_start <= addr) &&
|
||||||
|
(num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
|
||||||
|
(addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
|
||||||
|
size_t num_pages, size_t alignment, size_t offset,
|
||||||
|
size_t guard_pages) const;
|
||||||
|
|
||||||
|
Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
|
||||||
|
KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||||
|
Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
|
||||||
|
KMemoryState state, KMemoryPermission perm_mask,
|
||||||
|
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||||
|
KMemoryAttribute attr) const {
|
||||||
|
R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
||||||
|
perm, attr_mask, attr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||||
|
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||||
|
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
||||||
|
KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
|
||||||
|
KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
|
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||||
|
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||||
|
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
||||||
|
KProcessAddress addr, size_t size, KMemoryState state_mask,
|
||||||
|
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
|
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||||
|
Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
|
||||||
|
KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
|
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||||
|
R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||||
|
state_mask, state, perm_mask, perm, attr_mask, attr,
|
||||||
|
ignore_attr));
|
||||||
|
}
|
||||||
|
Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
|
||||||
|
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
|
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||||
|
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
||||||
|
attr_mask, attr, ignore_attr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, KProcessAddress addr,
|
||||||
|
size_t size, KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
|
KMemoryPermission new_perm, KMemoryAttribute lock_attr);
|
||||||
|
Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
|
||||||
|
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
|
KMemoryPermission new_perm, KMemoryAttribute lock_attr,
|
||||||
|
const KPageGroup* pg);
|
||||||
|
|
||||||
|
Result QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page,
|
||||||
|
KProcessAddress address) const;
|
||||||
|
|
||||||
|
Result QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size,
|
||||||
|
Svc::MemoryState state) const;
|
||||||
|
|
||||||
|
Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
|
||||||
|
size_t num_pages, KMemoryPermission perm);
|
||||||
|
Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
|
||||||
|
const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
|
||||||
|
|
||||||
|
void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
|
||||||
|
const KPageGroup& pg);
|
||||||
|
|
||||||
|
Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
|
||||||
|
bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
|
||||||
|
|
||||||
|
Result GetContiguousMemoryRangeWithState(MemoryRange* out, KProcessAddress address, size_t size,
|
||||||
|
KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
||||||
|
|
||||||
|
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
|
||||||
|
KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
|
||||||
|
size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||||
|
|
||||||
|
Result MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, KPhysicalAddress phys_addr,
|
||||||
|
size_t size, KMemoryState state, KMemoryPermission perm);
|
||||||
|
Result ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, size_t size,
|
||||||
|
KMemoryState state);
|
||||||
|
Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, size_t size,
|
||||||
|
KMemoryState state);
|
||||||
|
|
||||||
|
Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
|
||||||
|
KProcessAddress address, size_t size, KMemoryPermission test_perm,
|
||||||
|
KMemoryState dst_state);
|
||||||
|
Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
|
||||||
|
KMemoryPermission test_perm, KMemoryState dst_state,
|
||||||
|
KPageTableBase& src_page_table, bool send);
|
||||||
|
void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
|
||||||
|
size_t size, KMemoryPermission prot_perm);
|
||||||
|
|
||||||
|
size_t GetSize(KMemoryState state) const;
|
||||||
|
|
||||||
|
bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
|
||||||
|
// Validate pre-conditions.
|
||||||
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
return this->GetImpl().GetPhysicalAddress(out, virt_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress virt_addr) const {
|
||||||
|
// Validate pre-conditions.
|
||||||
|
ASSERT(!this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
// Acquire exclusive access to the table while doing address translation.
|
||||||
|
KScopedLightLock lk(m_general_lock);
|
||||||
|
|
||||||
|
return this->GetPhysicalAddressLocked(out, virt_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
KBlockInfoManager* GetBlockInfoManager() const {
|
||||||
|
return m_block_info_manager;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
|
||||||
|
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
|
||||||
|
Svc::MemoryPermission perm);
|
||||||
|
Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
|
||||||
|
KMemoryAttribute attr);
|
||||||
|
Result SetHeapSize(KProcessAddress* out, size_t size);
|
||||||
|
Result SetMaxHeapSize(size_t size);
|
||||||
|
Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
|
||||||
|
KProcessAddress addr) const;
|
||||||
|
Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) const;
|
||||||
|
Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
|
||||||
|
R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Static));
|
||||||
|
}
|
||||||
|
Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
|
||||||
|
R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Io));
|
||||||
|
}
|
||||||
|
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
|
||||||
|
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
|
||||||
|
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
|
||||||
|
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
|
||||||
|
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
|
||||||
|
Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
|
||||||
|
Svc::MemoryMapping mapping, Svc::MemoryPermission perm);
|
||||||
|
Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
|
||||||
|
Svc::MemoryMapping mapping);
|
||||||
|
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
|
||||||
|
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
|
||||||
|
Result MapInsecureMemory(KProcessAddress address, size_t size);
|
||||||
|
Result UnmapInsecureMemory(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
|
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
|
||||||
|
KPhysicalAddress phys_addr, KProcessAddress region_start,
|
||||||
|
size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
|
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
|
||||||
|
region_num_pages, state, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
|
||||||
|
KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||||
|
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||||
|
this->GetRegionAddress(state),
|
||||||
|
this->GetRegionSize(state) / PageSize, state, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
|
||||||
|
KMemoryPermission perm) {
|
||||||
|
R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
|
||||||
|
this->GetRegionAddress(state),
|
||||||
|
this->GetRegionSize(state) / PageSize, state, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
|
||||||
|
KMemoryPermission perm);
|
||||||
|
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
|
||||||
|
|
||||||
|
Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
|
||||||
|
KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
|
||||||
|
KMemoryPermission perm);
|
||||||
|
Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
|
||||||
|
KMemoryPermission perm);
|
||||||
|
Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
|
||||||
|
|
||||||
|
Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
|
||||||
|
KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
||||||
|
|
||||||
|
Result InvalidateProcessDataCache(KProcessAddress address, size_t size);
|
||||||
|
Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
|
Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
|
||||||
|
Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
|
||||||
|
KMemoryState state);
|
||||||
|
|
||||||
|
Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
|
||||||
|
Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
|
||||||
|
KMemoryState state);
|
||||||
|
|
||||||
|
Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
|
||||||
|
KMemoryPermission perm, bool is_aligned, bool check_heap);
|
||||||
|
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
|
||||||
|
|
||||||
|
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
|
||||||
|
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
|
Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
|
||||||
|
KProcessAddress address, size_t size,
|
||||||
|
KMemoryPermission perm, bool is_aligned);
|
||||||
|
Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, KProcessAddress address,
|
||||||
|
size_t size);
|
||||||
|
|
||||||
|
Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
|
||||||
|
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
|
Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
|
||||||
|
KMemoryPermission perm);
|
||||||
|
Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
|
||||||
|
Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size);
|
||||||
|
Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
|
||||||
|
|
||||||
|
Result OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, KProcessAddress address,
|
||||||
|
size_t size);
|
||||||
|
|
||||||
|
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
|
||||||
|
KProcessAddress src_addr, KMemoryState src_state_mask,
|
||||||
|
KMemoryState src_state, KMemoryPermission src_test_perm,
|
||||||
|
KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
|
||||||
|
Result CopyMemoryFromLinearToKernel(void* buffer, size_t size, KProcessAddress src_addr,
|
||||||
|
KMemoryState src_state_mask, KMemoryState src_state,
|
||||||
|
KMemoryPermission src_test_perm,
|
||||||
|
KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
|
||||||
|
Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
|
||||||
|
KMemoryState dst_state_mask, KMemoryState dst_state,
|
||||||
|
KMemoryPermission dst_test_perm,
|
||||||
|
KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
|
||||||
|
KProcessAddress src_addr);
|
||||||
|
Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
|
||||||
|
KMemoryState dst_state_mask, KMemoryState dst_state,
|
||||||
|
KMemoryPermission dst_test_perm,
|
||||||
|
KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
|
||||||
|
void* buffer);
|
||||||
|
Result CopyMemoryFromHeapToHeap(KPageTableBase& dst_page_table, KProcessAddress dst_addr,
|
||||||
|
size_t size, KMemoryState dst_state_mask,
|
||||||
|
KMemoryState dst_state, KMemoryPermission dst_test_perm,
|
||||||
|
KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
|
||||||
|
KProcessAddress src_addr, KMemoryState src_state_mask,
|
||||||
|
KMemoryState src_state, KMemoryPermission src_test_perm,
|
||||||
|
KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
|
||||||
|
Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
|
||||||
|
KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
|
||||||
|
KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
|
||||||
|
KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
|
||||||
|
KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
|
||||||
|
KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
|
||||||
|
|
||||||
|
Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
|
||||||
|
KPageTableBase& src_page_table, KMemoryPermission test_perm,
|
||||||
|
KMemoryState dst_state, bool send);
|
||||||
|
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
|
||||||
|
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
|
||||||
|
|
||||||
|
Result MapPhysicalMemory(KProcessAddress address, size_t size);
|
||||||
|
Result UnmapPhysicalMemory(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
|
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
|
||||||
|
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
|
Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase& src_pt,
|
||||||
|
KProcessAddress src_address);
|
||||||
|
|
||||||
|
public:
|
||||||
|
KProcessAddress GetAddressSpaceStart() const {
|
||||||
|
return m_address_space_start;
|
||||||
|
}
|
||||||
|
KProcessAddress GetHeapRegionStart() const {
|
||||||
|
return m_heap_region_start;
|
||||||
|
}
|
||||||
|
KProcessAddress GetAliasRegionStart() const {
|
||||||
|
return m_alias_region_start;
|
||||||
|
}
|
||||||
|
KProcessAddress GetStackRegionStart() const {
|
||||||
|
return m_stack_region_start;
|
||||||
|
}
|
||||||
|
KProcessAddress GetKernelMapRegionStart() const {
|
||||||
|
return m_kernel_map_region_start;
|
||||||
|
}
|
||||||
|
KProcessAddress GetCodeRegionStart() const {
|
||||||
|
return m_code_region_start;
|
||||||
|
}
|
||||||
|
KProcessAddress GetAliasCodeRegionStart() const {
|
||||||
|
return m_alias_code_region_start;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetAddressSpaceSize() const {
|
||||||
|
return m_address_space_end - m_address_space_start;
|
||||||
|
}
|
||||||
|
size_t GetHeapRegionSize() const {
|
||||||
|
return m_heap_region_end - m_heap_region_start;
|
||||||
|
}
|
||||||
|
size_t GetAliasRegionSize() const {
|
||||||
|
return m_alias_region_end - m_alias_region_start;
|
||||||
|
}
|
||||||
|
size_t GetStackRegionSize() const {
|
||||||
|
return m_stack_region_end - m_stack_region_start;
|
||||||
|
}
|
||||||
|
size_t GetKernelMapRegionSize() const {
|
||||||
|
return m_kernel_map_region_end - m_kernel_map_region_start;
|
||||||
|
}
|
||||||
|
size_t GetCodeRegionSize() const {
|
||||||
|
return m_code_region_end - m_code_region_start;
|
||||||
|
}
|
||||||
|
size_t GetAliasCodeRegionSize() const {
|
||||||
|
return m_alias_code_region_end - m_alias_code_region_start;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetNormalMemorySize() const {
|
||||||
|
// Lock the table.
|
||||||
|
KScopedLightLock lk(m_general_lock);
|
||||||
|
|
||||||
|
return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetCodeSize() const;
|
||||||
|
size_t GetCodeDataSize() const;
|
||||||
|
size_t GetAliasCodeSize() const;
|
||||||
|
size_t GetAliasCodeDataSize() const;
|
||||||
|
|
||||||
|
u32 GetAllocateOption() const {
|
||||||
|
return m_allocate_option;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 GetAddressSpaceWidth() const {
|
||||||
|
return m_address_space_width;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
// Linear mapped
|
||||||
|
static u8* GetLinearMappedVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
|
||||||
|
return kernel.System().DeviceMemory().GetPointer<u8>(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static KPhysicalAddress GetLinearMappedPhysicalAddress(KernelCore& kernel,
|
||||||
|
KVirtualAddress addr) {
|
||||||
|
return kernel.MemoryLayout().GetLinearPhysicalAddress(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static KVirtualAddress GetLinearMappedVirtualAddress(KernelCore& kernel,
|
||||||
|
KPhysicalAddress addr) {
|
||||||
|
return kernel.MemoryLayout().GetLinearVirtualAddress(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Heap
|
||||||
|
static u8* GetHeapVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
|
||||||
|
return kernel.System().DeviceMemory().GetPointer<u8>(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static KPhysicalAddress GetHeapPhysicalAddress(KernelCore& kernel, KVirtualAddress addr) {
|
||||||
|
return GetLinearMappedPhysicalAddress(kernel, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static KVirtualAddress GetHeapVirtualAddress(KernelCore& kernel, KPhysicalAddress addr) {
|
||||||
|
return GetLinearMappedVirtualAddress(kernel, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Member heap
|
||||||
|
u8* GetHeapVirtualPointer(KPhysicalAddress addr) {
|
||||||
|
return GetHeapVirtualPointer(m_kernel, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) {
|
||||||
|
return GetHeapPhysicalAddress(m_kernel, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
|
||||||
|
return GetHeapVirtualAddress(m_kernel, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: GetPageTableVirtualAddress
|
||||||
|
// TODO: GetPageTablePhysicalAddress
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
||||||
@@ -298,9 +298,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa
|
|||||||
const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
|
const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
|
||||||
const bool enable_das_merge =
|
const bool enable_das_merge =
|
||||||
False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
|
False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
|
||||||
R_TRY(m_page_table.InitializeForProcess(
|
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
|
||||||
as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address,
|
params.code_address, params.code_num_pages * PageSize,
|
||||||
params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetMemory()));
|
m_system_resource, res_limit, this->GetMemory()));
|
||||||
}
|
}
|
||||||
ON_RESULT_FAILURE_2 {
|
ON_RESULT_FAILURE_2 {
|
||||||
m_page_table.Finalize();
|
m_page_table.Finalize();
|
||||||
@@ -391,9 +391,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
|
|||||||
const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
|
const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
|
||||||
const bool enable_das_merge =
|
const bool enable_das_merge =
|
||||||
False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
|
False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
|
||||||
R_TRY(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge,
|
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
|
||||||
!enable_aslr, pool, params.code_address, code_size,
|
params.code_address, code_size, m_system_resource, res_limit,
|
||||||
m_system_resource, res_limit, this->GetMemory()));
|
this->GetMemory()));
|
||||||
}
|
}
|
||||||
ON_RESULT_FAILURE_2 {
|
ON_RESULT_FAILURE_2 {
|
||||||
m_page_table.Finalize();
|
m_page_table.Finalize();
|
||||||
@@ -1122,9 +1122,9 @@ Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_
|
|||||||
void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
|
void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
|
||||||
|
|
||||||
KProcess::KProcess(KernelCore& kernel)
|
KProcess::KProcess(KernelCore& kernel)
|
||||||
: KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel.System()},
|
: KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
|
||||||
m_state_lock{kernel}, m_list_lock{kernel}, m_cond_var{kernel.System()},
|
m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
|
||||||
m_address_arbiter{kernel.System()}, m_handle_table{kernel} {}
|
m_handle_table{kernel} {}
|
||||||
KProcess::~KProcess() = default;
|
KProcess::~KProcess() = default;
|
||||||
|
|
||||||
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
|
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
|
||||||
|
|||||||
@@ -5,13 +5,14 @@
|
|||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
|
||||||
|
#include "core/file_sys/program_metadata.h"
|
||||||
#include "core/hle/kernel/code_set.h"
|
#include "core/hle/kernel/code_set.h"
|
||||||
#include "core/hle/kernel/k_address_arbiter.h"
|
#include "core/hle/kernel/k_address_arbiter.h"
|
||||||
#include "core/hle/kernel/k_capabilities.h"
|
#include "core/hle/kernel/k_capabilities.h"
|
||||||
#include "core/hle/kernel/k_condition_variable.h"
|
#include "core/hle/kernel/k_condition_variable.h"
|
||||||
#include "core/hle/kernel/k_handle_table.h"
|
#include "core/hle/kernel/k_handle_table.h"
|
||||||
#include "core/hle/kernel/k_page_table.h"
|
|
||||||
#include "core/hle/kernel/k_page_table_manager.h"
|
#include "core/hle/kernel/k_page_table_manager.h"
|
||||||
|
#include "core/hle/kernel/k_process_page_table.h"
|
||||||
#include "core/hle/kernel/k_system_resource.h"
|
#include "core/hle/kernel/k_system_resource.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/k_thread_local_page.h"
|
#include "core/hle/kernel/k_thread_local_page.h"
|
||||||
@@ -65,7 +66,7 @@ private:
|
|||||||
using TLPIterator = TLPTree::iterator;
|
using TLPIterator = TLPTree::iterator;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
KPageTable m_page_table;
|
KProcessPageTable m_page_table;
|
||||||
std::atomic<size_t> m_used_kernel_memory_size{};
|
std::atomic<size_t> m_used_kernel_memory_size{};
|
||||||
TLPTree m_fully_used_tlp_tree{};
|
TLPTree m_fully_used_tlp_tree{};
|
||||||
TLPTree m_partially_used_tlp_tree{};
|
TLPTree m_partially_used_tlp_tree{};
|
||||||
@@ -254,9 +255,8 @@ public:
|
|||||||
return m_is_hbl;
|
return m_is_hbl;
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::KMemoryManager::Direction GetAllocateOption() const {
|
u32 GetAllocateOption() const {
|
||||||
// TODO: property of the KPageTableBase
|
return m_page_table.GetAllocateOption();
|
||||||
return KMemoryManager::Direction::FromFront;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadList& GetThreadList() {
|
ThreadList& GetThreadList() {
|
||||||
@@ -295,10 +295,10 @@ public:
|
|||||||
return m_list_lock;
|
return m_list_lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
KPageTable& GetPageTable() {
|
KProcessPageTable& GetPageTable() {
|
||||||
return m_page_table;
|
return m_page_table;
|
||||||
}
|
}
|
||||||
const KPageTable& GetPageTable() const {
|
const KProcessPageTable& GetPageTable() const {
|
||||||
return m_page_table;
|
return m_page_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
480
src/core/hle/kernel/k_process_page_table.h
Normal file
480
src/core/hle/kernel/k_process_page_table.h
Normal file
@@ -0,0 +1,480 @@
|
|||||||
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "core/hle/kernel/k_page_table.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_lock.h"
|
||||||
|
#include "core/hle/kernel/svc_types.h"
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
class ARM_Interface;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KProcessPageTable {
|
||||||
|
private:
|
||||||
|
KPageTable m_page_table;
|
||||||
|
|
||||||
|
public:
|
||||||
|
KProcessPageTable(KernelCore& kernel) : m_page_table(kernel) {}
|
||||||
|
|
||||||
|
Result Initialize(Svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge,
|
||||||
|
bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address,
|
||||||
|
size_t code_size, KSystemResource* system_resource,
|
||||||
|
KResourceLimit* resource_limit, Core::Memory::Memory& memory) {
|
||||||
|
R_RETURN(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge,
|
||||||
|
from_back, pool, code_address, code_size,
|
||||||
|
system_resource, resource_limit, memory));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Finalize() {
|
||||||
|
m_page_table.Finalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
Core::Memory::Memory& GetMemory() {
|
||||||
|
return m_page_table.GetMemory();
|
||||||
|
}
|
||||||
|
|
||||||
|
Core::Memory::Memory& GetMemory() const {
|
||||||
|
return m_page_table.GetMemory();
|
||||||
|
}
|
||||||
|
|
||||||
|
Common::PageTable& GetImpl() {
|
||||||
|
return m_page_table.GetImpl();
|
||||||
|
}
|
||||||
|
|
||||||
|
Common::PageTable& GetImpl() const {
|
||||||
|
return m_page_table.GetImpl();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetNumGuardPages() const {
|
||||||
|
return m_page_table.GetNumGuardPages();
|
||||||
|
}
|
||||||
|
|
||||||
|
KScopedLightLock AcquireDeviceMapLock() {
|
||||||
|
return m_page_table.AcquireDeviceMapLock();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm) {
|
||||||
|
R_RETURN(m_page_table.SetMemoryPermission(addr, size, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
|
||||||
|
Svc::MemoryPermission perm) {
|
||||||
|
R_RETURN(m_page_table.SetProcessMemoryPermission(addr, size, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
|
||||||
|
KMemoryAttribute attr) {
|
||||||
|
R_RETURN(m_page_table.SetMemoryAttribute(addr, size, mask, attr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result SetHeapSize(KProcessAddress* out, size_t size) {
|
||||||
|
R_RETURN(m_page_table.SetHeapSize(out, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result SetMaxHeapSize(size_t size) {
|
||||||
|
R_RETURN(m_page_table.SetMaxHeapSize(size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
|
||||||
|
KProcessAddress addr) const {
|
||||||
|
R_RETURN(m_page_table.QueryInfo(out_info, out_page_info, addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) {
|
||||||
|
R_RETURN(m_page_table.QueryPhysicalAddress(out, address));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.QueryStaticMapping(out, address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.QueryIoMapping(out, address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.MapMemory(dst_address, src_address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.UnmapMemory(dst_address, src_address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.MapCodeMemory(dst_address, src_address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.UnmapCodeMemory(dst_address, src_address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||||
|
R_RETURN(m_page_table.MapIo(phys_addr, size, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
|
||||||
|
Svc::MemoryMapping mapping, Svc::MemoryPermission perm) {
|
||||||
|
R_RETURN(m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
|
||||||
|
Svc::MemoryMapping mapping) {
|
||||||
|
R_RETURN(m_page_table.UnmapIoRegion(dst_address, phys_addr, size, mapping));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||||
|
R_RETURN(m_page_table.MapStatic(phys_addr, size, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
|
||||||
|
R_RETURN(m_page_table.MapRegion(region_type, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapInsecureMemory(KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.MapInsecureMemory(address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnmapInsecureMemory(KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.UnmapInsecureMemory(address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
|
||||||
|
KMemoryPermission perm) {
|
||||||
|
R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state) {
|
||||||
|
R_RETURN(m_page_table.UnmapPageGroup(address, pg, state));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
|
||||||
|
KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||||
|
R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
|
||||||
|
KMemoryPermission perm) {
|
||||||
|
R_RETURN(m_page_table.MapPages(out_addr, num_pages, state, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
|
||||||
|
KMemoryPermission perm) {
|
||||||
|
R_RETURN(m_page_table.MapPages(address, num_pages, state, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
|
||||||
|
R_RETURN(m_page_table.UnmapPages(addr, num_pages, state));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
|
||||||
|
KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr) {
|
||||||
|
R_RETURN(m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state,
|
||||||
|
perm_mask, perm, attr_mask, attr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.InvalidateProcessDataCache(address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.ReadDebugMemory(dst_address, src_address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
|
||||||
|
KMemoryState state) {
|
||||||
|
R_RETURN(m_page_table.ReadDebugIoMemory(dst_address, src_address, size, state));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.WriteDebugMemory(dst_address, src_address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
|
||||||
|
KMemoryState state) {
|
||||||
|
R_RETURN(m_page_table.WriteDebugIoMemory(dst_address, src_address, size, state));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
|
||||||
|
KMemoryPermission perm, bool is_aligned, bool check_heap) {
|
||||||
|
R_RETURN(m_page_table.LockForMapDeviceAddressSpace(out_is_io, address, size, perm,
|
||||||
|
is_aligned, check_heap));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) {
|
||||||
|
R_RETURN(m_page_table.LockForUnmapDeviceAddressSpace(address, size, check_heap));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.UnlockForDeviceAddressSpace(address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
|
||||||
|
KProcessAddress address, size_t size,
|
||||||
|
KMemoryPermission perm, bool is_aligned) {
|
||||||
|
R_RETURN(m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm,
|
||||||
|
is_aligned));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
|
||||||
|
KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.LockForIpcUserBuffer(out, address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.UnlockForIpcUserBuffer(address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
|
||||||
|
KMemoryPermission perm) {
|
||||||
|
R_RETURN(m_page_table.LockForTransferMemory(out, address, size, perm));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
|
||||||
|
R_RETURN(m_page_table.UnlockForTransferMemory(address, size, pg));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.LockForCodeMemory(out, address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
|
||||||
|
R_RETURN(m_page_table.UnlockForCodeMemory(address, size, pg));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange* out,
|
||||||
|
KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
|
||||||
|
KProcessAddress src_addr, KMemoryState src_state_mask,
|
||||||
|
KMemoryState src_state, KMemoryPermission src_test_perm,
|
||||||
|
KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
|
||||||
|
R_RETURN(m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask,
|
||||||
|
src_state, src_test_perm, src_attr_mask,
|
||||||
|
src_attr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result CopyMemoryFromLinearToKernel(void* dst_addr, size_t size, KProcessAddress src_addr,
|
||||||
|
KMemoryState src_state_mask, KMemoryState src_state,
|
||||||
|
KMemoryPermission src_test_perm,
|
||||||
|
KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
|
||||||
|
R_RETURN(m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask,
|
||||||
|
src_state, src_test_perm, src_attr_mask,
|
||||||
|
src_attr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
|
||||||
|
KMemoryState dst_state_mask, KMemoryState dst_state,
|
||||||
|
KMemoryPermission dst_test_perm,
|
||||||
|
KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
|
||||||
|
KProcessAddress src_addr) {
|
||||||
|
R_RETURN(m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state,
|
||||||
|
dst_test_perm, dst_attr_mask, dst_attr,
|
||||||
|
src_addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
|
||||||
|
KMemoryState dst_state_mask, KMemoryState dst_state,
|
||||||
|
KMemoryPermission dst_test_perm,
|
||||||
|
KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
|
||||||
|
void* src_addr) {
|
||||||
|
R_RETURN(m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask,
|
||||||
|
dst_state, dst_test_perm, dst_attr_mask,
|
||||||
|
dst_attr, src_addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result CopyMemoryFromHeapToHeap(KProcessPageTable& dst_page_table, KProcessAddress dst_addr,
|
||||||
|
size_t size, KMemoryState dst_state_mask,
|
||||||
|
KMemoryState dst_state, KMemoryPermission dst_test_perm,
|
||||||
|
KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
|
||||||
|
KProcessAddress src_addr, KMemoryState src_state_mask,
|
||||||
|
KMemoryState src_state, KMemoryPermission src_test_perm,
|
||||||
|
KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
|
||||||
|
R_RETURN(m_page_table.CopyMemoryFromHeapToHeap(
|
||||||
|
dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
|
||||||
|
dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
|
||||||
|
src_attr_mask, src_attr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
|
||||||
|
KProcessPageTable& dst_page_table, KProcessAddress dst_addr, size_t size,
|
||||||
|
KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
|
||||||
|
KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
|
||||||
|
KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
|
||||||
|
KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
|
||||||
|
R_RETURN(m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(
|
||||||
|
dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
|
||||||
|
dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
|
||||||
|
src_attr_mask, src_attr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
|
||||||
|
KProcessPageTable& src_page_table, KMemoryPermission test_perm,
|
||||||
|
KMemoryState dst_state, bool send) {
|
||||||
|
R_RETURN(m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table,
|
||||||
|
test_perm, dst_state, send));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
|
||||||
|
R_RETURN(m_page_table.CleanupForIpcServer(address, size, dst_state));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
|
||||||
|
R_RETURN(m_page_table.CleanupForIpcClient(address, size, dst_state));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.MapPhysicalMemory(address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.UnmapPhysicalMemory(address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.MapPhysicalMemoryUnsafe(address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
||||||
|
R_RETURN(m_page_table.UnmapPhysicalMemoryUnsafe(address, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnmapProcessMemory(KProcessAddress dst_address, size_t size,
|
||||||
|
KProcessPageTable& src_page_table, KProcessAddress src_address) {
|
||||||
|
R_RETURN(m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table,
|
||||||
|
src_address));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress address) {
|
||||||
|
return m_page_table.GetPhysicalAddress(out, address);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Contains(KProcessAddress addr, size_t size) const {
|
||||||
|
return m_page_table.Contains(addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
|
||||||
|
return m_page_table.IsInAliasRegion(addr, size);
|
||||||
|
}
|
||||||
|
bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
|
||||||
|
return m_page_table.IsInHeapRegion(addr, size);
|
||||||
|
}
|
||||||
|
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
|
||||||
|
return m_page_table.IsInUnsafeAliasRegion(addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
|
||||||
|
return m_page_table.CanContain(addr, size, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
KProcessAddress GetAddressSpaceStart() const {
|
||||||
|
return m_page_table.GetAddressSpaceStart();
|
||||||
|
}
|
||||||
|
KProcessAddress GetHeapRegionStart() const {
|
||||||
|
return m_page_table.GetHeapRegionStart();
|
||||||
|
}
|
||||||
|
KProcessAddress GetAliasRegionStart() const {
|
||||||
|
return m_page_table.GetAliasRegionStart();
|
||||||
|
}
|
||||||
|
KProcessAddress GetStackRegionStart() const {
|
||||||
|
return m_page_table.GetStackRegionStart();
|
||||||
|
}
|
||||||
|
KProcessAddress GetKernelMapRegionStart() const {
|
||||||
|
return m_page_table.GetKernelMapRegionStart();
|
||||||
|
}
|
||||||
|
KProcessAddress GetCodeRegionStart() const {
|
||||||
|
return m_page_table.GetCodeRegionStart();
|
||||||
|
}
|
||||||
|
KProcessAddress GetAliasCodeRegionStart() const {
|
||||||
|
return m_page_table.GetAliasCodeRegionStart();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetAddressSpaceSize() const {
|
||||||
|
return m_page_table.GetAddressSpaceSize();
|
||||||
|
}
|
||||||
|
size_t GetHeapRegionSize() const {
|
||||||
|
return m_page_table.GetHeapRegionSize();
|
||||||
|
}
|
||||||
|
size_t GetAliasRegionSize() const {
|
||||||
|
return m_page_table.GetAliasRegionSize();
|
||||||
|
}
|
||||||
|
size_t GetStackRegionSize() const {
|
||||||
|
return m_page_table.GetStackRegionSize();
|
||||||
|
}
|
||||||
|
size_t GetKernelMapRegionSize() const {
|
||||||
|
return m_page_table.GetKernelMapRegionSize();
|
||||||
|
}
|
||||||
|
size_t GetCodeRegionSize() const {
|
||||||
|
return m_page_table.GetCodeRegionSize();
|
||||||
|
}
|
||||||
|
size_t GetAliasCodeRegionSize() const {
|
||||||
|
return m_page_table.GetAliasCodeRegionSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetNormalMemorySize() const {
|
||||||
|
return m_page_table.GetNormalMemorySize();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetCodeSize() const {
|
||||||
|
return m_page_table.GetCodeSize();
|
||||||
|
}
|
||||||
|
size_t GetCodeDataSize() const {
|
||||||
|
return m_page_table.GetCodeDataSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetAliasCodeSize() const {
|
||||||
|
return m_page_table.GetAliasCodeSize();
|
||||||
|
}
|
||||||
|
size_t GetAliasCodeDataSize() const {
|
||||||
|
return m_page_table.GetAliasCodeDataSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 GetAllocateOption() const {
|
||||||
|
return m_page_table.GetAllocateOption();
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 GetAddressSpaceWidth() const {
|
||||||
|
return m_page_table.GetAddressSpaceWidth();
|
||||||
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) {
|
||||||
|
return m_page_table.GetHeapPhysicalAddress(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
u8* GetHeapVirtualPointer(KPhysicalAddress address) {
|
||||||
|
return m_page_table.GetHeapVirtualPointer(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) {
|
||||||
|
return m_page_table.GetHeapVirtualAddress(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
KBlockInfoManager* GetBlockInfoManager() {
|
||||||
|
return m_page_table.GetBlockInfoManager();
|
||||||
|
}
|
||||||
|
|
||||||
|
KPageTable& GetBasePageTable() {
|
||||||
|
return m_page_table;
|
||||||
|
}
|
||||||
|
|
||||||
|
const KPageTable& GetBasePageTable() const {
|
||||||
|
return m_page_table;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
||||||
@@ -383,7 +383,7 @@ Result KServerSession::SendReply(bool is_hle) {
|
|||||||
if (event != nullptr) {
|
if (event != nullptr) {
|
||||||
// // Get the client process/page table.
|
// // Get the client process/page table.
|
||||||
// KProcess *client_process = client_thread->GetOwnerProcess();
|
// KProcess *client_process = client_thread->GetOwnerProcess();
|
||||||
// KPageTable *client_page_table = std::addressof(client_process->PageTable());
|
// KProcessPageTable *client_page_table = std::addressof(client_process->PageTable());
|
||||||
|
|
||||||
// // If we need to, reply with an async error.
|
// // If we need to, reply with an async error.
|
||||||
// if (R_FAILED(client_result)) {
|
// if (R_FAILED(client_result)) {
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_l
|
|||||||
|
|
||||||
// Get resource pointer.
|
// Get resource pointer.
|
||||||
KPhysicalAddress resource_paddr =
|
KPhysicalAddress resource_paddr =
|
||||||
KPageTable::GetHeapPhysicalAddress(m_kernel.MemoryLayout(), m_resource_address);
|
KPageTable::GetHeapPhysicalAddress(m_kernel, m_resource_address);
|
||||||
auto* resource =
|
auto* resource =
|
||||||
m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr);
|
m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr);
|
||||||
|
|
||||||
|
|||||||
@@ -37,8 +37,8 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
|
|||||||
|
|
||||||
Result KThreadLocalPage::Finalize() {
|
Result KThreadLocalPage::Finalize() {
|
||||||
// Get the physical address of the page.
|
// Get the physical address of the page.
|
||||||
const KPhysicalAddress phys_addr = m_owner->GetPageTable().GetPhysicalAddr(m_virt_addr);
|
KPhysicalAddress phys_addr{};
|
||||||
ASSERT(phys_addr);
|
ASSERT(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), m_virt_addr));
|
||||||
|
|
||||||
// Unmap the page.
|
// Unmap the page.
|
||||||
R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal));
|
R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal));
|
||||||
|
|||||||
@@ -1,389 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
||||||
|
|
||||||
#include <bit>
|
|
||||||
|
|
||||||
#include "common/bit_util.h"
|
|
||||||
#include "common/logging/log.h"
|
|
||||||
#include "core/hle/kernel/k_handle_table.h"
|
|
||||||
#include "core/hle/kernel/k_page_table.h"
|
|
||||||
#include "core/hle/kernel/process_capability.h"
|
|
||||||
#include "core/hle/kernel/svc_results.h"
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
// clang-format off
|
|
||||||
|
|
||||||
// Shift offsets for kernel capability types.
|
|
||||||
enum : u32 {
|
|
||||||
CapabilityOffset_PriorityAndCoreNum = 3,
|
|
||||||
CapabilityOffset_Syscall = 4,
|
|
||||||
CapabilityOffset_MapPhysical = 6,
|
|
||||||
CapabilityOffset_MapIO = 7,
|
|
||||||
CapabilityOffset_MapRegion = 10,
|
|
||||||
CapabilityOffset_Interrupt = 11,
|
|
||||||
CapabilityOffset_ProgramType = 13,
|
|
||||||
CapabilityOffset_KernelVersion = 14,
|
|
||||||
CapabilityOffset_HandleTableSize = 15,
|
|
||||||
CapabilityOffset_Debug = 16,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Combined mask of all parameters that may be initialized only once.
|
|
||||||
constexpr u32 InitializeOnceMask = (1U << CapabilityOffset_PriorityAndCoreNum) |
|
|
||||||
(1U << CapabilityOffset_ProgramType) |
|
|
||||||
(1U << CapabilityOffset_KernelVersion) |
|
|
||||||
(1U << CapabilityOffset_HandleTableSize) |
|
|
||||||
(1U << CapabilityOffset_Debug);
|
|
||||||
|
|
||||||
// Packed kernel version indicating 10.4.0
|
|
||||||
constexpr u32 PackedKernelVersion = 0x520000;
|
|
||||||
|
|
||||||
// Indicates possible types of capabilities that can be specified.
|
|
||||||
enum class CapabilityType : u32 {
|
|
||||||
Unset = 0U,
|
|
||||||
PriorityAndCoreNum = (1U << CapabilityOffset_PriorityAndCoreNum) - 1,
|
|
||||||
Syscall = (1U << CapabilityOffset_Syscall) - 1,
|
|
||||||
MapPhysical = (1U << CapabilityOffset_MapPhysical) - 1,
|
|
||||||
MapIO = (1U << CapabilityOffset_MapIO) - 1,
|
|
||||||
MapRegion = (1U << CapabilityOffset_MapRegion) - 1,
|
|
||||||
Interrupt = (1U << CapabilityOffset_Interrupt) - 1,
|
|
||||||
ProgramType = (1U << CapabilityOffset_ProgramType) - 1,
|
|
||||||
KernelVersion = (1U << CapabilityOffset_KernelVersion) - 1,
|
|
||||||
HandleTableSize = (1U << CapabilityOffset_HandleTableSize) - 1,
|
|
||||||
Debug = (1U << CapabilityOffset_Debug) - 1,
|
|
||||||
Ignorable = 0xFFFFFFFFU,
|
|
||||||
};
|
|
||||||
|
|
||||||
// clang-format on
|
|
||||||
|
|
||||||
constexpr CapabilityType GetCapabilityType(u32 value) {
|
|
||||||
return static_cast<CapabilityType>((~value & (value + 1)) - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 GetFlagBitOffset(CapabilityType type) {
|
|
||||||
const auto value = static_cast<u32>(type);
|
|
||||||
return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value)));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // Anonymous namespace
|
|
||||||
|
|
||||||
Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
|
|
||||||
std::size_t num_capabilities,
|
|
||||||
KPageTable& page_table) {
|
|
||||||
Clear();
|
|
||||||
|
|
||||||
// Allow all cores and priorities.
|
|
||||||
core_mask = 0xF;
|
|
||||||
priority_mask = 0xFFFFFFFFFFFFFFFF;
|
|
||||||
kernel_version = PackedKernelVersion;
|
|
||||||
|
|
||||||
return ParseCapabilities(capabilities, num_capabilities, page_table);
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
|
|
||||||
std::size_t num_capabilities,
|
|
||||||
KPageTable& page_table) {
|
|
||||||
Clear();
|
|
||||||
|
|
||||||
return ParseCapabilities(capabilities, num_capabilities, page_table);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ProcessCapabilities::InitializeForMetadatalessProcess() {
|
|
||||||
// Allow all cores and priorities
|
|
||||||
core_mask = 0xF;
|
|
||||||
priority_mask = 0xFFFFFFFFFFFFFFFF;
|
|
||||||
kernel_version = PackedKernelVersion;
|
|
||||||
|
|
||||||
// Allow all system calls and interrupts.
|
|
||||||
svc_capabilities.set();
|
|
||||||
interrupt_capabilities.set();
|
|
||||||
|
|
||||||
// Allow using the maximum possible amount of handles
|
|
||||||
handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize);
|
|
||||||
|
|
||||||
// Allow all debugging capabilities.
|
|
||||||
is_debuggable = true;
|
|
||||||
can_force_debug = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
|
|
||||||
KPageTable& page_table) {
|
|
||||||
u32 set_flags = 0;
|
|
||||||
u32 set_svc_bits = 0;
|
|
||||||
|
|
||||||
for (std::size_t i = 0; i < num_capabilities; ++i) {
|
|
||||||
const u32 descriptor = capabilities[i];
|
|
||||||
const auto type = GetCapabilityType(descriptor);
|
|
||||||
|
|
||||||
if (type == CapabilityType::MapPhysical) {
|
|
||||||
i++;
|
|
||||||
|
|
||||||
// The MapPhysical type uses two descriptor flags for its parameters.
|
|
||||||
// If there's only one, then there's a problem.
|
|
||||||
if (i >= num_capabilities) {
|
|
||||||
LOG_ERROR(Kernel, "Invalid combination! i={}", i);
|
|
||||||
return ResultInvalidCombination;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto size_flags = capabilities[i];
|
|
||||||
if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
|
|
||||||
LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
|
|
||||||
return ResultInvalidCombination;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
|
|
||||||
if (result.IsError()) {
|
|
||||||
LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}",
|
|
||||||
descriptor, size_flags);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
const auto result =
|
|
||||||
ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table);
|
|
||||||
if (result.IsError()) {
|
|
||||||
LOG_ERROR(
|
|
||||||
Kernel,
|
|
||||||
"Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}",
|
|
||||||
set_flags, set_svc_bits, descriptor);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
|
|
||||||
KPageTable& page_table) {
|
|
||||||
const auto type = GetCapabilityType(flag);
|
|
||||||
|
|
||||||
if (type == CapabilityType::Unset) {
|
|
||||||
return ResultInvalidArgument;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bail early on ignorable entries, as one would expect,
|
|
||||||
// ignorable descriptors can be ignored.
|
|
||||||
if (type == CapabilityType::Ignorable) {
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that the give flag hasn't already been initialized before.
|
|
||||||
// If it has been, then bail.
|
|
||||||
const u32 flag_length = GetFlagBitOffset(type);
|
|
||||||
const u32 set_flag = 1U << flag_length;
|
|
||||||
if ((set_flag & set_flags & InitializeOnceMask) != 0) {
|
|
||||||
LOG_ERROR(Kernel,
|
|
||||||
"Attempted to initialize flags that may only be initialized once. set_flags={}",
|
|
||||||
set_flags);
|
|
||||||
return ResultInvalidCombination;
|
|
||||||
}
|
|
||||||
set_flags |= set_flag;
|
|
||||||
|
|
||||||
switch (type) {
|
|
||||||
case CapabilityType::PriorityAndCoreNum:
|
|
||||||
return HandlePriorityCoreNumFlags(flag);
|
|
||||||
case CapabilityType::Syscall:
|
|
||||||
return HandleSyscallFlags(set_svc_bits, flag);
|
|
||||||
case CapabilityType::MapIO:
|
|
||||||
return HandleMapIOFlags(flag, page_table);
|
|
||||||
case CapabilityType::MapRegion:
|
|
||||||
return HandleMapRegionFlags(flag, page_table);
|
|
||||||
case CapabilityType::Interrupt:
|
|
||||||
return HandleInterruptFlags(flag);
|
|
||||||
case CapabilityType::ProgramType:
|
|
||||||
return HandleProgramTypeFlags(flag);
|
|
||||||
case CapabilityType::KernelVersion:
|
|
||||||
return HandleKernelVersionFlags(flag);
|
|
||||||
case CapabilityType::HandleTableSize:
|
|
||||||
return HandleHandleTableFlags(flag);
|
|
||||||
case CapabilityType::Debug:
|
|
||||||
return HandleDebugFlags(flag);
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
|
|
||||||
return ResultInvalidArgument;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ProcessCapabilities::Clear() {
|
|
||||||
svc_capabilities.reset();
|
|
||||||
interrupt_capabilities.reset();
|
|
||||||
|
|
||||||
core_mask = 0;
|
|
||||||
priority_mask = 0;
|
|
||||||
|
|
||||||
handle_table_size = 0;
|
|
||||||
kernel_version = 0;
|
|
||||||
|
|
||||||
program_type = ProgramType::SysModule;
|
|
||||||
|
|
||||||
is_debuggable = false;
|
|
||||||
can_force_debug = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
|
|
||||||
if (priority_mask != 0 || core_mask != 0) {
|
|
||||||
LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
|
|
||||||
priority_mask, core_mask);
|
|
||||||
return ResultInvalidArgument;
|
|
||||||
}
|
|
||||||
|
|
||||||
const u32 core_num_min = (flags >> 16) & 0xFF;
|
|
||||||
const u32 core_num_max = (flags >> 24) & 0xFF;
|
|
||||||
if (core_num_min > core_num_max) {
|
|
||||||
LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
|
|
||||||
core_num_min, core_num_max);
|
|
||||||
return ResultInvalidCombination;
|
|
||||||
}
|
|
||||||
|
|
||||||
const u32 priority_min = (flags >> 10) & 0x3F;
|
|
||||||
const u32 priority_max = (flags >> 4) & 0x3F;
|
|
||||||
if (priority_min > priority_max) {
|
|
||||||
LOG_ERROR(Kernel,
|
|
||||||
"Priority min is greater than priority max! priority_min={}, priority_max={}",
|
|
||||||
core_num_min, priority_max);
|
|
||||||
return ResultInvalidCombination;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The switch only has 4 usable cores.
|
|
||||||
if (core_num_max >= 4) {
|
|
||||||
LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
|
|
||||||
return ResultInvalidCoreId;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto make_mask = [](u64 min, u64 max) {
|
|
||||||
const u64 range = max - min + 1;
|
|
||||||
const u64 mask = (1ULL << range) - 1;
|
|
||||||
|
|
||||||
return mask << min;
|
|
||||||
};
|
|
||||||
|
|
||||||
core_mask = make_mask(core_num_min, core_num_max);
|
|
||||||
priority_mask = make_mask(priority_min, priority_max);
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
|
|
||||||
const u32 index = flags >> 29;
|
|
||||||
const u32 svc_bit = 1U << index;
|
|
||||||
|
|
||||||
// If we've already set this svc before, bail.
|
|
||||||
if ((set_svc_bits & svc_bit) != 0) {
|
|
||||||
return ResultInvalidCombination;
|
|
||||||
}
|
|
||||||
set_svc_bits |= svc_bit;
|
|
||||||
|
|
||||||
const u32 svc_mask = (flags >> 5) & 0xFFFFFF;
|
|
||||||
for (u32 i = 0; i < 24; ++i) {
|
|
||||||
const u32 svc_number = index * 24 + i;
|
|
||||||
|
|
||||||
if ((svc_mask & (1U << i)) == 0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
svc_capabilities[svc_number] = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
|
|
||||||
KPageTable& page_table) {
|
|
||||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
|
|
||||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
|
|
||||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::HandleInterruptFlags(u32 flags) {
|
|
||||||
constexpr u32 interrupt_ignore_value = 0x3FF;
|
|
||||||
const u32 interrupt0 = (flags >> 12) & 0x3FF;
|
|
||||||
const u32 interrupt1 = (flags >> 22) & 0x3FF;
|
|
||||||
|
|
||||||
for (u32 interrupt : {interrupt0, interrupt1}) {
|
|
||||||
if (interrupt == interrupt_ignore_value) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE:
|
|
||||||
// This should be checking a generic interrupt controller value
|
|
||||||
// as part of the calculation, however, given we don't currently
|
|
||||||
// emulate that, it's sufficient to mark every interrupt as defined.
|
|
||||||
|
|
||||||
if (interrupt >= interrupt_capabilities.size()) {
|
|
||||||
LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
|
|
||||||
interrupt);
|
|
||||||
return ResultOutOfRange;
|
|
||||||
}
|
|
||||||
|
|
||||||
interrupt_capabilities[interrupt] = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
|
|
||||||
const u32 reserved = flags >> 17;
|
|
||||||
if (reserved != 0) {
|
|
||||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
|
||||||
return ResultReservedUsed;
|
|
||||||
}
|
|
||||||
|
|
||||||
program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
|
|
||||||
// Yes, the internal member variable is checked in the actual kernel here.
|
|
||||||
// This might look odd for options that are only allowed to be initialized
|
|
||||||
// just once, however the kernel has a separate initialization function for
|
|
||||||
// kernel processes and userland processes. The kernel variant sets this
|
|
||||||
// member variable ahead of time.
|
|
||||||
|
|
||||||
const u32 major_version = kernel_version >> 19;
|
|
||||||
|
|
||||||
if (major_version != 0 || flags < 0x80000) {
|
|
||||||
LOG_ERROR(Kernel,
|
|
||||||
"Kernel version is non zero or flags are too small! major_version={}, flags={}",
|
|
||||||
major_version, flags);
|
|
||||||
return ResultInvalidArgument;
|
|
||||||
}
|
|
||||||
|
|
||||||
kernel_version = flags;
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
|
|
||||||
const u32 reserved = flags >> 26;
|
|
||||||
if (reserved != 0) {
|
|
||||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
|
||||||
return ResultReservedUsed;
|
|
||||||
}
|
|
||||||
|
|
||||||
handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result ProcessCapabilities::HandleDebugFlags(u32 flags) {
|
|
||||||
const u32 reserved = flags >> 19;
|
|
||||||
if (reserved != 0) {
|
|
||||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
|
||||||
return ResultReservedUsed;
|
|
||||||
}
|
|
||||||
|
|
||||||
is_debuggable = (flags & 0x20000) != 0;
|
|
||||||
can_force_debug = (flags & 0x40000) != 0;
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Kernel
|
|
||||||
@@ -1,266 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <bitset>
|
|
||||||
|
|
||||||
#include "common/common_types.h"
|
|
||||||
|
|
||||||
union Result;
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
class KPageTable;
|
|
||||||
|
|
||||||
/// The possible types of programs that may be indicated
|
|
||||||
/// by the program type capability descriptor.
|
|
||||||
enum class ProgramType {
|
|
||||||
SysModule,
|
|
||||||
Application,
|
|
||||||
Applet,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Handles kernel capability descriptors that are provided by
|
|
||||||
/// application metadata. These descriptors provide information
|
|
||||||
/// that alters certain parameters for kernel process instance
|
|
||||||
/// that will run said application (or applet).
|
|
||||||
///
|
|
||||||
/// Capabilities are a sequence of flag descriptors, that indicate various
|
|
||||||
/// configurations and constraints for a particular process.
|
|
||||||
///
|
|
||||||
/// Flag types are indicated by a sequence of set low bits. E.g. the
|
|
||||||
/// types are indicated with the low bits as follows (where x indicates "don't care"):
|
|
||||||
///
|
|
||||||
/// - Priority and core mask : 0bxxxxxxxxxxxx0111
|
|
||||||
/// - Allowed service call mask: 0bxxxxxxxxxxx01111
|
|
||||||
/// - Map physical memory : 0bxxxxxxxxx0111111
|
|
||||||
/// - Map IO memory : 0bxxxxxxxx01111111
|
|
||||||
/// - Interrupts : 0bxxxx011111111111
|
|
||||||
/// - Application type : 0bxx01111111111111
|
|
||||||
/// - Kernel version : 0bx011111111111111
|
|
||||||
/// - Handle table size : 0b0111111111111111
|
|
||||||
/// - Debugger flags : 0b1111111111111111
|
|
||||||
///
|
|
||||||
/// These are essentially a bit offset subtracted by 1 to create a mask.
|
|
||||||
/// e.g. The first entry in the above list is simply bit 3 (value 8 -> 0b1000)
|
|
||||||
/// subtracted by one (7 -> 0b0111)
|
|
||||||
///
|
|
||||||
/// An example of a bit layout (using the map physical layout):
|
|
||||||
/// <example>
|
|
||||||
/// The MapPhysical type indicates a sequence entry pair of:
|
|
||||||
///
|
|
||||||
/// [initial, memory_flags], where:
|
|
||||||
///
|
|
||||||
/// initial:
|
|
||||||
/// bits:
|
|
||||||
/// 7-24: Starting page to map memory at.
|
|
||||||
/// 25 : Indicates if the memory should be mapped as read only.
|
|
||||||
///
|
|
||||||
/// memory_flags:
|
|
||||||
/// bits:
|
|
||||||
/// 7-20 : Number of pages to map
|
|
||||||
/// 21-25: Seems to be reserved (still checked against though)
|
|
||||||
/// 26 : Whether or not the memory being mapped is IO memory, or physical memory
|
|
||||||
/// </example>
|
|
||||||
///
|
|
||||||
class ProcessCapabilities {
|
|
||||||
public:
|
|
||||||
using InterruptCapabilities = std::bitset<1024>;
|
|
||||||
using SyscallCapabilities = std::bitset<192>;
|
|
||||||
|
|
||||||
ProcessCapabilities() = default;
|
|
||||||
ProcessCapabilities(const ProcessCapabilities&) = delete;
|
|
||||||
ProcessCapabilities(ProcessCapabilities&&) = default;
|
|
||||||
|
|
||||||
ProcessCapabilities& operator=(const ProcessCapabilities&) = delete;
|
|
||||||
ProcessCapabilities& operator=(ProcessCapabilities&&) = default;
|
|
||||||
|
|
||||||
/// Initializes this process capabilities instance for a kernel process.
|
|
||||||
///
|
|
||||||
/// @param capabilities The capabilities to parse
|
|
||||||
/// @param num_capabilities The number of capabilities to parse.
|
|
||||||
/// @param page_table The memory manager to use for handling any mapping-related
|
|
||||||
/// operations (such as mapping IO memory, etc).
|
|
||||||
///
|
|
||||||
/// @returns ResultSuccess if this capabilities instance was able to be initialized,
|
|
||||||
/// otherwise, an error code upon failure.
|
|
||||||
///
|
|
||||||
Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
|
|
||||||
KPageTable& page_table);
|
|
||||||
|
|
||||||
/// Initializes this process capabilities instance for a userland process.
|
|
||||||
///
|
|
||||||
/// @param capabilities The capabilities to parse.
|
|
||||||
/// @param num_capabilities The total number of capabilities to parse.
|
|
||||||
/// @param page_table The memory manager to use for handling any mapping-related
|
|
||||||
/// operations (such as mapping IO memory, etc).
|
|
||||||
///
|
|
||||||
/// @returns ResultSuccess if this capabilities instance was able to be initialized,
|
|
||||||
/// otherwise, an error code upon failure.
|
|
||||||
///
|
|
||||||
Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
|
|
||||||
KPageTable& page_table);
|
|
||||||
|
|
||||||
/// Initializes this process capabilities instance for a process that does not
|
|
||||||
/// have any metadata to parse.
|
|
||||||
///
|
|
||||||
/// This is necessary, as we allow running raw executables, and the internal
|
|
||||||
/// kernel process capabilities also determine what CPU cores the process is
|
|
||||||
/// allowed to run on, and what priorities are allowed for threads. It also
|
|
||||||
/// determines the max handle table size, what the program type is, whether or
|
|
||||||
/// not the process can be debugged, or whether it's possible for a process to
|
|
||||||
/// forcibly debug another process.
|
|
||||||
///
|
|
||||||
/// Given the above, this essentially enables all capabilities across the board
|
|
||||||
/// for the process. It allows the process to:
|
|
||||||
///
|
|
||||||
/// - Run on any core
|
|
||||||
/// - Use any thread priority
|
|
||||||
/// - Use the maximum amount of handles a process is allowed to.
|
|
||||||
/// - Be debuggable
|
|
||||||
/// - Forcibly debug other processes.
|
|
||||||
///
|
|
||||||
/// Note that this is not a behavior that the kernel allows a process to do via
|
|
||||||
/// a single function like this. This is yuzu-specific behavior to handle
|
|
||||||
/// executables with no capability descriptors whatsoever to derive behavior from.
|
|
||||||
/// It being yuzu-specific is why this is also not the default behavior and not
|
|
||||||
/// done by default in the constructor.
|
|
||||||
///
|
|
||||||
void InitializeForMetadatalessProcess();
|
|
||||||
|
|
||||||
/// Gets the allowable core mask
|
|
||||||
u64 GetCoreMask() const {
|
|
||||||
return core_mask;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the allowable priority mask
|
|
||||||
u64 GetPriorityMask() const {
|
|
||||||
return priority_mask;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the SVC access permission bits
|
|
||||||
const SyscallCapabilities& GetServiceCapabilities() const {
|
|
||||||
return svc_capabilities;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the valid interrupt bits.
|
|
||||||
const InterruptCapabilities& GetInterruptCapabilities() const {
|
|
||||||
return interrupt_capabilities;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the program type for this process.
|
|
||||||
ProgramType GetProgramType() const {
|
|
||||||
return program_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the number of total allowable handles for the process' handle table.
|
|
||||||
s32 GetHandleTableSize() const {
|
|
||||||
return handle_table_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the kernel version value.
|
|
||||||
u32 GetKernelVersion() const {
|
|
||||||
return kernel_version;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Whether or not this process can be debugged.
|
|
||||||
bool IsDebuggable() const {
|
|
||||||
return is_debuggable;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Whether or not this process can forcibly debug another
|
|
||||||
/// process, even if that process is not considered debuggable.
|
|
||||||
bool CanForceDebug() const {
|
|
||||||
return can_force_debug;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
/// Attempts to parse a given sequence of capability descriptors.
|
|
||||||
///
|
|
||||||
/// @param capabilities The sequence of capability descriptors to parse.
|
|
||||||
/// @param num_capabilities The number of descriptors within the given sequence.
|
|
||||||
/// @param page_table The memory manager that will perform any memory
|
|
||||||
/// mapping if necessary.
|
|
||||||
///
|
|
||||||
/// @return ResultSuccess if no errors occur, otherwise an error code.
|
|
||||||
///
|
|
||||||
Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
|
|
||||||
KPageTable& page_table);
|
|
||||||
|
|
||||||
/// Attempts to parse a capability descriptor that is only represented by a
|
|
||||||
/// single flag set.
|
|
||||||
///
|
|
||||||
/// @param set_flags Running set of flags that are used to catch
|
|
||||||
/// flags being initialized more than once when they shouldn't be.
|
|
||||||
/// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask.
|
|
||||||
/// @param flag The flag to attempt to parse.
|
|
||||||
/// @param page_table The memory manager that will perform any memory
|
|
||||||
/// mapping if necessary.
|
|
||||||
///
|
|
||||||
/// @return ResultSuccess if no errors occurred, otherwise an error code.
|
|
||||||
///
|
|
||||||
Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
|
|
||||||
KPageTable& page_table);
|
|
||||||
|
|
||||||
/// Clears the internal state of this process capability instance. Necessary,
|
|
||||||
/// to have a sane starting point due to us allowing running executables without
|
|
||||||
/// configuration metadata. We assume a process is not going to have metadata,
|
|
||||||
/// and if it turns out that the process does, in fact, have metadata, then
|
|
||||||
/// we attempt to parse it. Thus, we need this to reset data members back to
|
|
||||||
/// a good state.
|
|
||||||
///
|
|
||||||
/// DO NOT ever make this a public member function. This isn't an invariant
|
|
||||||
/// anything external should depend upon (and if anything comes to rely on it,
|
|
||||||
/// you should immediately be questioning the design of that thing, not this
|
|
||||||
/// class. If the kernel itself can run without depending on behavior like that,
|
|
||||||
/// then so can yuzu).
|
|
||||||
///
|
|
||||||
void Clear();
|
|
||||||
|
|
||||||
/// Handles flags related to the priority and core number capability flags.
|
|
||||||
Result HandlePriorityCoreNumFlags(u32 flags);
|
|
||||||
|
|
||||||
/// Handles flags related to determining the allowable SVC mask.
|
|
||||||
Result HandleSyscallFlags(u32& set_svc_bits, u32 flags);
|
|
||||||
|
|
||||||
/// Handles flags related to mapping physical memory pages.
|
|
||||||
Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
|
|
||||||
|
|
||||||
/// Handles flags related to mapping IO pages.
|
|
||||||
Result HandleMapIOFlags(u32 flags, KPageTable& page_table);
|
|
||||||
|
|
||||||
/// Handles flags related to mapping physical memory regions.
|
|
||||||
Result HandleMapRegionFlags(u32 flags, KPageTable& page_table);
|
|
||||||
|
|
||||||
/// Handles flags related to the interrupt capability flags.
|
|
||||||
Result HandleInterruptFlags(u32 flags);
|
|
||||||
|
|
||||||
/// Handles flags related to the program type.
|
|
||||||
Result HandleProgramTypeFlags(u32 flags);
|
|
||||||
|
|
||||||
/// Handles flags related to the handle table size.
|
|
||||||
Result HandleHandleTableFlags(u32 flags);
|
|
||||||
|
|
||||||
/// Handles flags related to the kernel version capability flags.
|
|
||||||
Result HandleKernelVersionFlags(u32 flags);
|
|
||||||
|
|
||||||
/// Handles flags related to debug-specific capabilities.
|
|
||||||
Result HandleDebugFlags(u32 flags);
|
|
||||||
|
|
||||||
SyscallCapabilities svc_capabilities;
|
|
||||||
InterruptCapabilities interrupt_capabilities;
|
|
||||||
|
|
||||||
u64 core_mask = 0;
|
|
||||||
u64 priority_mask = 0;
|
|
||||||
|
|
||||||
s32 handle_table_size = 0;
|
|
||||||
u32 kernel_version = 0;
|
|
||||||
|
|
||||||
ProgramType program_type = ProgramType::SysModule;
|
|
||||||
|
|
||||||
bool is_debuggable = false;
|
|
||||||
bool can_force_debug = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace Kernel
|
|
||||||
@@ -29,7 +29,8 @@ constexpr bool IsValidAddressRange(u64 address, u64 size) {
|
|||||||
// Helper function that performs the common sanity checks for svcMapMemory
|
// Helper function that performs the common sanity checks for svcMapMemory
|
||||||
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
|
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
|
||||||
// in the same order.
|
// in the same order.
|
||||||
Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) {
|
Result MapUnmapMemorySanityChecks(const KProcessPageTable& manager, u64 dst_addr, u64 src_addr,
|
||||||
|
u64 size) {
|
||||||
if (!Common::Is4KBAligned(dst_addr)) {
|
if (!Common::Is4KBAligned(dst_addr)) {
|
||||||
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
|
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
|
||||||
R_THROW(ResultInvalidAddress);
|
R_THROW(ResultInvalidAddress);
|
||||||
@@ -123,7 +124,8 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask,
|
|||||||
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
|
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
// Set the memory attribute.
|
// Set the memory attribute.
|
||||||
R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr));
|
R_RETURN(page_table.SetMemoryAttribute(address, size, static_cast<KMemoryAttribute>(mask),
|
||||||
|
static_cast<KMemoryAttribute>(attr)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maps a memory range into a different range.
|
/// Maps a memory range into a different range.
|
||||||
|
|||||||
@@ -16,7 +16,14 @@ Result SetHeapSize(Core::System& system, u64* out_address, u64 size) {
|
|||||||
R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
|
R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
|
||||||
|
|
||||||
// Set the heap size.
|
// Set the heap size.
|
||||||
R_RETURN(GetCurrentProcess(system.Kernel()).GetPageTable().SetHeapSize(out_address, size));
|
KProcessAddress address{};
|
||||||
|
R_TRY(GetCurrentProcess(system.Kernel())
|
||||||
|
.GetPageTable()
|
||||||
|
.SetHeapSize(std::addressof(address), size));
|
||||||
|
|
||||||
|
// We succeeded.
|
||||||
|
*out_address = GetInteger(address);
|
||||||
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maps memory at a desired address
|
/// Maps memory at a desired address
|
||||||
|
|||||||
@@ -247,8 +247,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
|
|||||||
R_THROW(ResultInvalidCurrentMemory);
|
R_THROW(ResultInvalidCurrentMemory);
|
||||||
}
|
}
|
||||||
|
|
||||||
R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size,
|
R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size));
|
||||||
KPageTable::ICacheInvalidationStrategy::InvalidateAll));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address,
|
Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address,
|
||||||
|
|||||||
@@ -31,12 +31,12 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto& current_memory{GetCurrentMemory(system.Kernel())};
|
auto& current_memory{GetCurrentMemory(system.Kernel())};
|
||||||
const auto memory_info{process->GetPageTable().QueryInfo(address).GetSvcMemoryInfo()};
|
|
||||||
|
|
||||||
current_memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info));
|
KMemoryInfo mem_info;
|
||||||
|
R_TRY(process->GetPageTable().QueryInfo(std::addressof(mem_info), out_page_info, address));
|
||||||
|
|
||||||
//! This is supposed to be part of the QueryInfo call.
|
const auto svc_mem_info = mem_info.GetSvcMemoryInfo();
|
||||||
*out_page_info = {};
|
current_memory.WriteBlock(out_memory_info, std::addressof(svc_mem_info), sizeof(svc_mem_info));
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -407,3 +407,34 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
|
|||||||
|
|
||||||
/// Evaluates a boolean expression, and succeeds if that expression is true.
|
/// Evaluates a boolean expression, and succeeds if that expression is true.
|
||||||
#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess)
|
#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess)
|
||||||
|
|
||||||
|
#define R_TRY_CATCH(res_expr) \
|
||||||
|
{ \
|
||||||
|
const auto R_CURRENT_RESULT = (res_expr); \
|
||||||
|
if (R_FAILED(R_CURRENT_RESULT)) { \
|
||||||
|
if (false)
|
||||||
|
|
||||||
|
#define R_END_TRY_CATCH \
|
||||||
|
else if (R_FAILED(R_CURRENT_RESULT)) { \
|
||||||
|
R_THROW(R_CURRENT_RESULT); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define R_CATCH_ALL() \
|
||||||
|
} \
|
||||||
|
else if (R_FAILED(R_CURRENT_RESULT)) { \
|
||||||
|
if (true)
|
||||||
|
|
||||||
|
#define R_CATCH(res_expr) \
|
||||||
|
} \
|
||||||
|
else if ((res_expr) == (R_CURRENT_RESULT)) { \
|
||||||
|
if (true)
|
||||||
|
|
||||||
|
#define R_CONVERT(catch_type, convert_type) \
|
||||||
|
R_CATCH(catch_type) { R_THROW(static_cast<Result>(convert_type)); }
|
||||||
|
|
||||||
|
#define R_CONVERT_ALL(convert_type) \
|
||||||
|
R_CATCH_ALL() { R_THROW(static_cast<Result>(convert_type)); }
|
||||||
|
|
||||||
|
#define R_ASSERT(res_expr) ASSERT(R_SUCCEEDED(res_expr))
|
||||||
|
|||||||
@@ -3,11 +3,13 @@
|
|||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/fs/file.h"
|
#include "common/fs/file.h"
|
||||||
#include "common/fs/path_util.h"
|
#include "common/fs/path_util.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/polyfill_ranges.h"
|
#include "common/polyfill_ranges.h"
|
||||||
|
#include "common/stb.h"
|
||||||
#include "common/string_util.h"
|
#include "common/string_util.h"
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
#include "core/constants.h"
|
#include "core/constants.h"
|
||||||
@@ -38,9 +40,36 @@ static std::filesystem::path GetImagePath(const Common::UUID& uuid) {
|
|||||||
fmt::format("system/save/8000000000000010/su/avators/{}.jpg", uuid.FormattedString());
|
fmt::format("system/save/8000000000000010/su/avators/{}.jpg", uuid.FormattedString());
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr u32 SanitizeJPEGSize(std::size_t size) {
|
static void JPGToMemory(void* context, void* data, int len) {
|
||||||
|
std::vector<u8>* jpg_image = static_cast<std::vector<u8>*>(context);
|
||||||
|
unsigned char* jpg = static_cast<unsigned char*>(data);
|
||||||
|
jpg_image->insert(jpg_image->end(), jpg, jpg + len);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void SanitizeJPEGImageSize(std::vector<u8>& image) {
|
||||||
constexpr std::size_t max_jpeg_image_size = 0x20000;
|
constexpr std::size_t max_jpeg_image_size = 0x20000;
|
||||||
return static_cast<u32>(std::min(size, max_jpeg_image_size));
|
constexpr int profile_dimensions = 256;
|
||||||
|
int original_width, original_height, color_channels;
|
||||||
|
|
||||||
|
const auto plain_image =
|
||||||
|
stbi_load_from_memory(image.data(), static_cast<int>(image.size()), &original_width,
|
||||||
|
&original_height, &color_channels, STBI_rgb);
|
||||||
|
|
||||||
|
// Resize image to match 256*256
|
||||||
|
if (original_width != profile_dimensions || original_height != profile_dimensions) {
|
||||||
|
// Use vector instead of array to avoid overflowing the stack
|
||||||
|
std::vector<u8> out_image(profile_dimensions * profile_dimensions * STBI_rgb);
|
||||||
|
stbir_resize_uint8_srgb(plain_image, original_width, original_height, 0, out_image.data(),
|
||||||
|
profile_dimensions, profile_dimensions, 0, STBI_rgb, 0,
|
||||||
|
STBIR_FILTER_BOX);
|
||||||
|
image.clear();
|
||||||
|
if (!stbi_write_jpg_to_func(JPGToMemory, &image, profile_dimensions, profile_dimensions,
|
||||||
|
STBI_rgb, out_image.data(), 0)) {
|
||||||
|
LOG_ERROR(Service_ACC, "Failed to resize the user provided image.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
image.resize(std::min(image.size(), max_jpeg_image_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
class IManagerForSystemService final : public ServiceFramework<IManagerForSystemService> {
|
class IManagerForSystemService final : public ServiceFramework<IManagerForSystemService> {
|
||||||
@@ -339,19 +368,20 @@ protected:
|
|||||||
LOG_WARNING(Service_ACC,
|
LOG_WARNING(Service_ACC,
|
||||||
"Failed to load user provided image! Falling back to built-in backup...");
|
"Failed to load user provided image! Falling back to built-in backup...");
|
||||||
ctx.WriteBuffer(Core::Constants::ACCOUNT_BACKUP_JPEG);
|
ctx.WriteBuffer(Core::Constants::ACCOUNT_BACKUP_JPEG);
|
||||||
rb.Push(SanitizeJPEGSize(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
|
rb.Push(static_cast<u32>(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const u32 size = SanitizeJPEGSize(image.GetSize());
|
std::vector<u8> buffer(image.GetSize());
|
||||||
std::vector<u8> buffer(size);
|
|
||||||
|
|
||||||
if (image.Read(buffer) != buffer.size()) {
|
if (image.Read(buffer) != buffer.size()) {
|
||||||
LOG_ERROR(Service_ACC, "Failed to read all the bytes in the user provided image.");
|
LOG_ERROR(Service_ACC, "Failed to read all the bytes in the user provided image.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SanitizeJPEGImageSize(buffer);
|
||||||
|
|
||||||
ctx.WriteBuffer(buffer);
|
ctx.WriteBuffer(buffer);
|
||||||
rb.Push<u32>(size);
|
rb.Push(static_cast<u32>(buffer.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetImageSize(HLERequestContext& ctx) {
|
void GetImageSize(HLERequestContext& ctx) {
|
||||||
@@ -365,10 +395,18 @@ protected:
|
|||||||
if (!image.IsOpen()) {
|
if (!image.IsOpen()) {
|
||||||
LOG_WARNING(Service_ACC,
|
LOG_WARNING(Service_ACC,
|
||||||
"Failed to load user provided image! Falling back to built-in backup...");
|
"Failed to load user provided image! Falling back to built-in backup...");
|
||||||
rb.Push(SanitizeJPEGSize(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
|
rb.Push(static_cast<u32>(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
|
||||||
} else {
|
return;
|
||||||
rb.Push(SanitizeJPEGSize(image.GetSize()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::vector<u8> buffer(image.GetSize());
|
||||||
|
|
||||||
|
if (image.Read(buffer) != buffer.size()) {
|
||||||
|
LOG_ERROR(Service_ACC, "Failed to read all the bytes in the user provided image.");
|
||||||
|
}
|
||||||
|
|
||||||
|
SanitizeJPEGImageSize(buffer);
|
||||||
|
rb.Push(static_cast<u32>(buffer.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Store(HLERequestContext& ctx) {
|
void Store(HLERequestContext& ctx) {
|
||||||
|
|||||||
@@ -330,8 +330,7 @@ void WebBrowser::ExtractOfflineRomFS() {
|
|||||||
LOG_DEBUG(Service_AM, "Extracting RomFS to {}",
|
LOG_DEBUG(Service_AM, "Extracting RomFS to {}",
|
||||||
Common::FS::PathToUTF8String(offline_cache_dir));
|
Common::FS::PathToUTF8String(offline_cache_dir));
|
||||||
|
|
||||||
const auto extracted_romfs_dir =
|
const auto extracted_romfs_dir = FileSys::ExtractRomFS(offline_romfs);
|
||||||
FileSys::ExtractRomFS(offline_romfs, FileSys::RomFSExtractionType::SingleDiscard);
|
|
||||||
|
|
||||||
const auto temp_dir = system.GetFilesystem()->CreateDirectory(
|
const auto temp_dir = system.GetFilesystem()->CreateDirectory(
|
||||||
Common::FS::PathToUTF8String(offline_cache_dir), FileSys::Mode::ReadWrite);
|
Common::FS::PathToUTF8String(offline_cache_dir), FileSys::Mode::ReadWrite);
|
||||||
|
|||||||
@@ -69,6 +69,30 @@ enum class AppletId : u32 {
|
|||||||
MyPage = 0x1A,
|
MyPage = 0x1A,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum class AppletProgramId : u64 {
|
||||||
|
QLaunch = 0x0100000000001000ull,
|
||||||
|
Auth = 0x0100000000001001ull,
|
||||||
|
Cabinet = 0x0100000000001002ull,
|
||||||
|
Controller = 0x0100000000001003ull,
|
||||||
|
DataErase = 0x0100000000001004ull,
|
||||||
|
Error = 0x0100000000001005ull,
|
||||||
|
NetConnect = 0x0100000000001006ull,
|
||||||
|
ProfileSelect = 0x0100000000001007ull,
|
||||||
|
SoftwareKeyboard = 0x0100000000001008ull,
|
||||||
|
MiiEdit = 0x0100000000001009ull,
|
||||||
|
Web = 0x010000000000100Aull,
|
||||||
|
Shop = 0x010000000000100Bull,
|
||||||
|
OverlayDisplay = 0x010000000000100Cull,
|
||||||
|
PhotoViewer = 0x010000000000100Dull,
|
||||||
|
Settings = 0x010000000000100Eull,
|
||||||
|
OfflineWeb = 0x010000000000100Full,
|
||||||
|
LoginShare = 0x0100000000001010ull,
|
||||||
|
WebAuth = 0x0100000000001011ull,
|
||||||
|
Starter = 0x0100000000001012ull,
|
||||||
|
MyPage = 0x0100000000001013ull,
|
||||||
|
MaxProgramId = 0x0100000000001FFFull,
|
||||||
|
};
|
||||||
|
|
||||||
enum class LibraryAppletMode : u32 {
|
enum class LibraryAppletMode : u32 {
|
||||||
AllForeground = 0,
|
AllForeground = 0,
|
||||||
Background = 1,
|
Background = 1,
|
||||||
|
|||||||
@@ -1108,9 +1108,9 @@ Result Controller_NPad::DisconnectNpad(Core::HID::NpadIdType npad_id) {
|
|||||||
shared_memory->sixaxis_dual_right_properties.raw = 0;
|
shared_memory->sixaxis_dual_right_properties.raw = 0;
|
||||||
shared_memory->sixaxis_left_properties.raw = 0;
|
shared_memory->sixaxis_left_properties.raw = 0;
|
||||||
shared_memory->sixaxis_right_properties.raw = 0;
|
shared_memory->sixaxis_right_properties.raw = 0;
|
||||||
shared_memory->battery_level_dual = 0;
|
shared_memory->battery_level_dual = Core::HID::NpadBatteryLevel::Empty;
|
||||||
shared_memory->battery_level_left = 0;
|
shared_memory->battery_level_left = Core::HID::NpadBatteryLevel::Empty;
|
||||||
shared_memory->battery_level_right = 0;
|
shared_memory->battery_level_right = Core::HID::NpadBatteryLevel::Empty;
|
||||||
shared_memory->fullkey_color = {
|
shared_memory->fullkey_color = {
|
||||||
.attribute = ColorAttribute::NoController,
|
.attribute = ColorAttribute::NoController,
|
||||||
.fullkey = {},
|
.fullkey = {},
|
||||||
|
|||||||
@@ -1353,7 +1353,7 @@ void Hid::IsUnintendedHomeButtonInputProtectionEnabled(HLERequestContext& ctx) {
|
|||||||
void Hid::EnableUnintendedHomeButtonInputProtection(HLERequestContext& ctx) {
|
void Hid::EnableUnintendedHomeButtonInputProtection(HLERequestContext& ctx) {
|
||||||
IPC::RequestParser rp{ctx};
|
IPC::RequestParser rp{ctx};
|
||||||
struct Parameters {
|
struct Parameters {
|
||||||
bool unintended_home_button_input_protection;
|
bool is_enabled;
|
||||||
INSERT_PADDING_BYTES_NOINIT(3);
|
INSERT_PADDING_BYTES_NOINIT(3);
|
||||||
Core::HID::NpadIdType npad_id;
|
Core::HID::NpadIdType npad_id;
|
||||||
u64 applet_resource_user_id;
|
u64 applet_resource_user_id;
|
||||||
@@ -1364,13 +1364,11 @@ void Hid::EnableUnintendedHomeButtonInputProtection(HLERequestContext& ctx) {
|
|||||||
|
|
||||||
auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
|
auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
|
||||||
const auto result = controller.SetUnintendedHomeButtonInputProtectionEnabled(
|
const auto result = controller.SetUnintendedHomeButtonInputProtectionEnabled(
|
||||||
parameters.unintended_home_button_input_protection, parameters.npad_id);
|
parameters.is_enabled, parameters.npad_id);
|
||||||
|
|
||||||
LOG_WARNING(Service_HID,
|
LOG_DEBUG(Service_HID,
|
||||||
"(STUBBED) called, unintended_home_button_input_protection={}, npad_id={},"
|
"(STUBBED) called, is_enabled={}, npad_id={}, applet_resource_user_id={}",
|
||||||
"applet_resource_user_id={}",
|
parameters.is_enabled, parameters.npad_id, parameters.applet_resource_user_id);
|
||||||
parameters.unintended_home_button_input_protection, parameters.npad_id,
|
|
||||||
parameters.applet_resource_user_id);
|
|
||||||
|
|
||||||
IPC::ResponseBuilder rb{ctx, 2};
|
IPC::ResponseBuilder rb{ctx, 2};
|
||||||
rb.Push(result);
|
rb.Push(result);
|
||||||
|
|||||||
@@ -32,15 +32,15 @@ struct Lifo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::size_t GetPreviousEntryIndex() const {
|
std::size_t GetPreviousEntryIndex() const {
|
||||||
return static_cast<size_t>((buffer_tail + total_buffer_count - 1) % total_buffer_count);
|
return static_cast<size_t>((buffer_tail + max_buffer_size - 1) % max_buffer_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t GetNextEntryIndex() const {
|
std::size_t GetNextEntryIndex() const {
|
||||||
return static_cast<size_t>((buffer_tail + 1) % total_buffer_count);
|
return static_cast<size_t>((buffer_tail + 1) % max_buffer_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void WriteNextEntry(const State& new_state) {
|
void WriteNextEntry(const State& new_state) {
|
||||||
if (buffer_count < total_buffer_count - 1) {
|
if (buffer_count < static_cast<s64>(max_buffer_size) - 1) {
|
||||||
buffer_count++;
|
buffer_count++;
|
||||||
}
|
}
|
||||||
buffer_tail = GetNextEntryIndex();
|
buffer_tail = GetNextEntryIndex();
|
||||||
|
|||||||
@@ -286,9 +286,14 @@ public:
|
|||||||
rb.Push(ResultSuccess);
|
rb.Push(ResultSuccess);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const {
|
bool ValidateRegionForMap(Kernel::KProcessPageTable& page_table, VAddr start,
|
||||||
|
std::size_t size) const {
|
||||||
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
|
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
|
||||||
const auto start_info{page_table.QueryInfo(start - 1)};
|
|
||||||
|
Kernel::KMemoryInfo start_info;
|
||||||
|
Kernel::Svc::PageInfo page_info;
|
||||||
|
R_ASSERT(
|
||||||
|
page_table.QueryInfo(std::addressof(start_info), std::addressof(page_info), start - 1));
|
||||||
|
|
||||||
if (start_info.GetState() != Kernel::KMemoryState::Free) {
|
if (start_info.GetState() != Kernel::KMemoryState::Free) {
|
||||||
return {};
|
return {};
|
||||||
@@ -298,7 +303,9 @@ public:
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto end_info{page_table.QueryInfo(start + size)};
|
Kernel::KMemoryInfo end_info;
|
||||||
|
R_ASSERT(page_table.QueryInfo(std::addressof(end_info), std::addressof(page_info),
|
||||||
|
start + size));
|
||||||
|
|
||||||
if (end_info.GetState() != Kernel::KMemoryState::Free) {
|
if (end_info.GetState() != Kernel::KMemoryState::Free) {
|
||||||
return {};
|
return {};
|
||||||
@@ -307,7 +314,7 @@ public:
|
|||||||
return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
|
return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
Result GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) {
|
Result GetAvailableMapRegion(Kernel::KProcessPageTable& page_table, u64 size, VAddr& out_addr) {
|
||||||
size = Common::AlignUp(size, Kernel::PageSize);
|
size = Common::AlignUp(size, Kernel::PageSize);
|
||||||
size += page_table.GetNumGuardPages() * Kernel::PageSize * 4;
|
size += page_table.GetNumGuardPages() * Kernel::PageSize * 4;
|
||||||
|
|
||||||
@@ -391,12 +398,8 @@ public:
|
|||||||
|
|
||||||
if (bss_size) {
|
if (bss_size) {
|
||||||
auto block_guard = detail::ScopeExit([&] {
|
auto block_guard = detail::ScopeExit([&] {
|
||||||
page_table.UnmapCodeMemory(
|
page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size);
|
||||||
addr + nro_size, bss_addr, bss_size,
|
page_table.UnmapCodeMemory(addr, nro_addr, nro_size);
|
||||||
Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange);
|
|
||||||
page_table.UnmapCodeMemory(
|
|
||||||
addr, nro_addr, nro_size,
|
|
||||||
Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)};
|
const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)};
|
||||||
@@ -578,21 +581,17 @@ public:
|
|||||||
auto& page_table{system.ApplicationProcess()->GetPageTable()};
|
auto& page_table{system.ApplicationProcess()->GetPageTable()};
|
||||||
|
|
||||||
if (info.bss_size != 0) {
|
if (info.bss_size != 0) {
|
||||||
R_TRY(page_table.UnmapCodeMemory(
|
R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size +
|
||||||
info.nro_address + info.text_size + info.ro_size + info.data_size, info.bss_address,
|
info.data_size,
|
||||||
info.bss_size, Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
|
info.bss_address, info.bss_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
R_TRY(page_table.UnmapCodeMemory(
|
R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size,
|
||||||
info.nro_address + info.text_size + info.ro_size,
|
info.src_addr + info.text_size + info.ro_size,
|
||||||
info.src_addr + info.text_size + info.ro_size, info.data_size,
|
info.data_size));
|
||||||
Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
|
R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size,
|
||||||
R_TRY(page_table.UnmapCodeMemory(
|
info.src_addr + info.text_size, info.ro_size));
|
||||||
info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size,
|
R_TRY(page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size));
|
||||||
Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
|
|
||||||
R_TRY(page_table.UnmapCodeMemory(
|
|
||||||
info.nro_address, info.src_addr, info.text_size,
|
|
||||||
Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
159
src/core/hle/service/nvdrv/devices/ioctl_serialization.h
Normal file
159
src/core/hle/service/nvdrv/devices/ioctl_serialization.h
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <span>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "common/concepts.h"
|
||||||
|
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||||
|
|
||||||
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
|
struct IoctlOneArgTraits {
|
||||||
|
template <typename T, typename R, typename A, typename... B>
|
||||||
|
static A GetFirstArgImpl(R (T::*)(A, B...));
|
||||||
|
};
|
||||||
|
|
||||||
|
struct IoctlTwoArgTraits {
|
||||||
|
template <typename T, typename R, typename A, typename B, typename... C>
|
||||||
|
static A GetFirstArgImpl(R (T::*)(A, B, C...));
|
||||||
|
|
||||||
|
template <typename T, typename R, typename A, typename B, typename... C>
|
||||||
|
static B GetSecondArgImpl(R (T::*)(A, B, C...));
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Null {};
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
|
|
||||||
|
template <typename FixedArg, typename VarArg, typename InlInVarArg, typename InlOutVarArg, typename F>
|
||||||
|
NvResult WrapGeneric(F&& callable, std::span<const u8> input, std::span<const u8> inline_input, std::span<u8> output, std::span<u8> inline_output) {
|
||||||
|
constexpr bool HasFixedArg = !std::is_same_v<FixedArg, Null>;
|
||||||
|
constexpr bool HasVarArg = !std::is_same_v<VarArg, Null>;
|
||||||
|
constexpr bool HasInlInVarArg = !std::is_same_v<InlInVarArg, Null>;
|
||||||
|
constexpr bool HasInlOutVarArg = !std::is_same_v<InlOutVarArg, Null>;
|
||||||
|
|
||||||
|
// Declare the fixed-size input value.
|
||||||
|
FixedArg fixed{};
|
||||||
|
size_t var_offset = 0;
|
||||||
|
|
||||||
|
if constexpr (HasFixedArg) {
|
||||||
|
// Read the fixed-size input value.
|
||||||
|
var_offset = std::min(sizeof(FixedArg), input.size());
|
||||||
|
if (var_offset > 0) {
|
||||||
|
std::memcpy(&fixed, input.data(), var_offset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the variable-sized inputs.
|
||||||
|
const size_t num_var_args = HasVarArg ? ((input.size() - var_offset) / sizeof(VarArg)) : 0;
|
||||||
|
std::vector<VarArg> var_args(num_var_args);
|
||||||
|
if constexpr (HasVarArg) {
|
||||||
|
if (num_var_args > 0) {
|
||||||
|
std::memcpy(var_args.data(), input.data() + var_offset, num_var_args * sizeof(VarArg));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t num_inl_in_var_args = HasInlInVarArg ? (inline_input.size() / sizeof(InlInVarArg)) : 0;
|
||||||
|
std::vector<InlInVarArg> inl_in_var_args(num_inl_in_var_args);
|
||||||
|
if constexpr (HasInlInVarArg) {
|
||||||
|
if (num_inl_in_var_args > 0) {
|
||||||
|
std::memcpy(inl_in_var_args.data(), inline_input.data(), num_inl_in_var_args * sizeof(InlInVarArg));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct inline output data.
|
||||||
|
const size_t num_inl_out_var_args = HasInlOutVarArg ? (inline_output.size() / sizeof(InlOutVarArg)) : 0;
|
||||||
|
std::vector<InlOutVarArg> inl_out_var_args(num_inl_out_var_args);
|
||||||
|
|
||||||
|
// Perform the call.
|
||||||
|
NvResult result = callable(fixed, var_args, inl_in_var_args, inl_out_var_args);
|
||||||
|
|
||||||
|
// Copy outputs.
|
||||||
|
if constexpr (HasFixedArg) {
|
||||||
|
if (output.size() > 0) {
|
||||||
|
std::memcpy(output.data(), &fixed, std::min(output.size(), sizeof(FixedArg)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if constexpr (HasVarArg) {
|
||||||
|
if (num_var_args > 0 && output.size() > var_offset) {
|
||||||
|
const size_t max_var_size = output.size() - var_offset;
|
||||||
|
std::memcpy(output.data() + var_offset, var_args.data(), std::min(max_var_size, num_var_args * sizeof(VarArg)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy inline outputs.
|
||||||
|
if constexpr (HasInlOutVarArg) {
|
||||||
|
if (num_inl_out_var_args > 0) {
|
||||||
|
std::memcpy(inline_output.data(), inl_out_var_args.data(), num_inl_out_var_args * sizeof(InlOutVarArg));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're done.
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Self, typename F, typename... Rest>
|
||||||
|
NvResult WrapFixed(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) {
|
||||||
|
using FixedArg = typename std::remove_reference_t<decltype(IoctlOneArgTraits::GetFirstArgImpl(callable))>;
|
||||||
|
|
||||||
|
const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
|
||||||
|
return (self->*callable)(fixed, std::forward<Rest>(rest)...);
|
||||||
|
};
|
||||||
|
|
||||||
|
return WrapGeneric<FixedArg, Null, Null, Null>(std::move(Callable), input, {}, output, {});
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Self, typename F, typename... Rest>
|
||||||
|
NvResult WrapFixedInlOut(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, std::span<u8> inline_output, Rest&&... rest) {
|
||||||
|
using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>;
|
||||||
|
using InlOutVarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type;
|
||||||
|
|
||||||
|
const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
|
||||||
|
return (self->*callable)(fixed, inl_out, std::forward<Rest>(rest)...);
|
||||||
|
};
|
||||||
|
|
||||||
|
return WrapGeneric<FixedArg, Null, Null, InlOutVarArg>(std::move(Callable), input, {}, output, inline_output);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Self, typename F, typename... Rest>
|
||||||
|
NvResult WrapVariable(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) {
|
||||||
|
using VarArg = typename std::remove_reference_t<decltype(IoctlOneArgTraits::GetFirstArgImpl(callable))>::value_type;
|
||||||
|
|
||||||
|
const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
|
||||||
|
return (self->*callable)(var, std::forward<Rest>(rest)...);
|
||||||
|
};
|
||||||
|
|
||||||
|
return WrapGeneric<Null, VarArg, Null, Null>(std::move(Callable), input, {}, output, {});
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Self, typename F, typename... Rest>
|
||||||
|
NvResult WrapFixedVariable(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) {
|
||||||
|
using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>;
|
||||||
|
using VarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type;
|
||||||
|
|
||||||
|
const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
|
||||||
|
return (self->*callable)(fixed, var, std::forward<Rest>(rest)...);
|
||||||
|
};
|
||||||
|
|
||||||
|
return WrapGeneric<FixedArg, VarArg, Null, Null>(std::move(Callable), input, {}, output, {});
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Self, typename F, typename... Rest>
|
||||||
|
NvResult WrapFixedInlIn(Self* self, F&& callable, std::span<const u8> input, std::span<const u8> inline_input, std::span<u8> output, Rest&&... rest) {
|
||||||
|
using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>;
|
||||||
|
using InlInVarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type;
|
||||||
|
|
||||||
|
const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
|
||||||
|
return (self->*callable)(fixed, inl_in, std::forward<Rest>(rest)...);
|
||||||
|
};
|
||||||
|
|
||||||
|
return WrapGeneric<FixedArg, Null, InlInVarArg, Null>(std::move(Callable), input, inline_input, output, {});
|
||||||
|
}
|
||||||
|
|
||||||
|
// clang-format on
|
||||||
|
|
||||||
|
} // namespace Service::Nvidia::Devices
|
||||||
@@ -11,6 +11,7 @@
|
|||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/service/nvdrv/core/container.h"
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
|
#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
@@ -33,21 +34,21 @@ NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> i
|
|||||||
case 'A':
|
case 'A':
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x1:
|
case 0x1:
|
||||||
return BindChannel(input, output);
|
return WrapFixed(this, &nvhost_as_gpu::BindChannel, input, output);
|
||||||
case 0x2:
|
case 0x2:
|
||||||
return AllocateSpace(input, output);
|
return WrapFixed(this, &nvhost_as_gpu::AllocateSpace, input, output);
|
||||||
case 0x3:
|
case 0x3:
|
||||||
return FreeSpace(input, output);
|
return WrapFixed(this, &nvhost_as_gpu::FreeSpace, input, output);
|
||||||
case 0x5:
|
case 0x5:
|
||||||
return UnmapBuffer(input, output);
|
return WrapFixed(this, &nvhost_as_gpu::UnmapBuffer, input, output);
|
||||||
case 0x6:
|
case 0x6:
|
||||||
return MapBufferEx(input, output);
|
return WrapFixed(this, &nvhost_as_gpu::MapBufferEx, input, output);
|
||||||
case 0x8:
|
case 0x8:
|
||||||
return GetVARegions(input, output);
|
return WrapFixed(this, &nvhost_as_gpu::GetVARegions1, input, output);
|
||||||
case 0x9:
|
case 0x9:
|
||||||
return AllocAsEx(input, output);
|
return WrapFixed(this, &nvhost_as_gpu::AllocAsEx, input, output);
|
||||||
case 0x14:
|
case 0x14:
|
||||||
return Remap(input, output);
|
return WrapVariable(this, &nvhost_as_gpu::Remap, input, output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -72,7 +73,8 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
|
|||||||
case 'A':
|
case 'A':
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x8:
|
case 0x8:
|
||||||
return GetVARegions(input, output, inline_output);
|
return WrapFixedInlOut(this, &nvhost_as_gpu::GetVARegions3, input, output,
|
||||||
|
inline_output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -87,10 +89,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
|
|||||||
void nvhost_as_gpu::OnOpen(DeviceFD fd) {}
|
void nvhost_as_gpu::OnOpen(DeviceFD fd) {}
|
||||||
void nvhost_as_gpu::OnClose(DeviceFD fd) {}
|
void nvhost_as_gpu::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::AllocAsEx(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) {
|
||||||
IoctlAllocAsEx params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size);
|
LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size);
|
||||||
|
|
||||||
std::scoped_lock lock(mutex);
|
std::scoped_lock lock(mutex);
|
||||||
@@ -141,10 +140,7 @@ NvResult nvhost_as_gpu::AllocAsEx(std::span<const u8> input, std::span<u8> outpu
|
|||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::AllocateSpace(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) {
|
||||||
IoctlAllocSpace params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
|
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
|
||||||
params.page_size, params.flags);
|
params.page_size, params.flags);
|
||||||
|
|
||||||
@@ -194,7 +190,6 @@ NvResult nvhost_as_gpu::AllocateSpace(std::span<const u8> input, std::span<u8> o
|
|||||||
.big_pages = params.page_size != VM::YUZU_PAGESIZE,
|
.big_pages = params.page_size != VM::YUZU_PAGESIZE,
|
||||||
};
|
};
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -222,10 +217,7 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
|
|||||||
mapping_map.erase(offset);
|
mapping_map.erase(offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::FreeSpace(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) {
|
||||||
IoctlFreeSpace params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
|
LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
|
||||||
params.pages, params.page_size);
|
params.pages, params.page_size);
|
||||||
|
|
||||||
@@ -264,18 +256,11 @@ NvResult nvhost_as_gpu::FreeSpace(std::span<const u8> input, std::span<u8> outpu
|
|||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::Remap(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) {
|
||||||
const auto num_entries = input.size() / sizeof(IoctlRemapEntry);
|
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", entries.size());
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
|
|
||||||
|
|
||||||
std::scoped_lock lock(mutex);
|
|
||||||
entries.resize_destructive(num_entries);
|
|
||||||
std::memcpy(entries.data(), input.data(), input.size());
|
|
||||||
|
|
||||||
if (!vm.initialised) {
|
if (!vm.initialised) {
|
||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
@@ -317,14 +302,10 @@ NvResult nvhost_as_gpu::Remap(std::span<const u8> input, std::span<u8> output) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memcpy(output.data(), entries.data(), output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::MapBufferEx(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
|
||||||
IoctlMapBufferEx params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV,
|
LOG_DEBUG(Service_NVDRV,
|
||||||
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
|
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
|
||||||
", offset={}",
|
", offset={}",
|
||||||
@@ -421,14 +402,10 @@ NvResult nvhost_as_gpu::MapBufferEx(std::span<const u8> input, std::span<u8> out
|
|||||||
mapping_map[params.offset] = mapping;
|
mapping_map[params.offset] = mapping;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::UnmapBuffer(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) {
|
||||||
IoctlUnmapBuffer params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
|
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
|
||||||
|
|
||||||
std::scoped_lock lock(mutex);
|
std::scoped_lock lock(mutex);
|
||||||
@@ -464,9 +441,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(std::span<const u8> input, std::span<u8> out
|
|||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::BindChannel(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_as_gpu::BindChannel(IoctlBindChannel& params) {
|
||||||
IoctlBindChannel params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
|
LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
|
||||||
|
|
||||||
auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd);
|
auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd);
|
||||||
@@ -493,10 +468,7 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_as_gpu::GetVARegions1(IoctlGetVaRegions& params) {
|
||||||
IoctlGetVaRegions params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||||
params.buf_size);
|
params.buf_size);
|
||||||
|
|
||||||
@@ -508,15 +480,10 @@ NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> ou
|
|||||||
|
|
||||||
GetVARegionsImpl(params);
|
GetVARegionsImpl(params);
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> output,
|
NvResult nvhost_as_gpu::GetVARegions3(IoctlGetVaRegions& params, std::span<VaRegion> regions) {
|
||||||
std::span<u8> inline_output) {
|
|
||||||
IoctlGetVaRegions params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||||
params.buf_size);
|
params.buf_size);
|
||||||
|
|
||||||
@@ -528,9 +495,10 @@ NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> ou
|
|||||||
|
|
||||||
GetVARegionsImpl(params);
|
GetVARegionsImpl(params);
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
const size_t num_regions = std::min(params.regions.size(), regions.size());
|
||||||
std::memcpy(inline_output.data(), ¶ms.regions[0], sizeof(VaRegion));
|
for (size_t i = 0; i < num_regions; i++) {
|
||||||
std::memcpy(inline_output.data() + sizeof(VaRegion), ¶ms.regions[1], sizeof(VaRegion));
|
regions[i] = params.regions[i];
|
||||||
|
}
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -139,18 +139,17 @@ private:
|
|||||||
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2,
|
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2,
|
||||||
"IoctlGetVaRegions is incorrect size");
|
"IoctlGetVaRegions is incorrect size");
|
||||||
|
|
||||||
NvResult AllocAsEx(std::span<const u8> input, std::span<u8> output);
|
NvResult AllocAsEx(IoctlAllocAsEx& params);
|
||||||
NvResult AllocateSpace(std::span<const u8> input, std::span<u8> output);
|
NvResult AllocateSpace(IoctlAllocSpace& params);
|
||||||
NvResult Remap(std::span<const u8> input, std::span<u8> output);
|
NvResult Remap(std::span<IoctlRemapEntry> params);
|
||||||
NvResult MapBufferEx(std::span<const u8> input, std::span<u8> output);
|
NvResult MapBufferEx(IoctlMapBufferEx& params);
|
||||||
NvResult UnmapBuffer(std::span<const u8> input, std::span<u8> output);
|
NvResult UnmapBuffer(IoctlUnmapBuffer& params);
|
||||||
NvResult FreeSpace(std::span<const u8> input, std::span<u8> output);
|
NvResult FreeSpace(IoctlFreeSpace& params);
|
||||||
NvResult BindChannel(std::span<const u8> input, std::span<u8> output);
|
NvResult BindChannel(IoctlBindChannel& params);
|
||||||
|
|
||||||
void GetVARegionsImpl(IoctlGetVaRegions& params);
|
void GetVARegionsImpl(IoctlGetVaRegions& params);
|
||||||
NvResult GetVARegions(std::span<const u8> input, std::span<u8> output);
|
NvResult GetVARegions1(IoctlGetVaRegions& params);
|
||||||
NvResult GetVARegions(std::span<const u8> input, std::span<u8> output,
|
NvResult GetVARegions3(IoctlGetVaRegions& params, std::span<VaRegion> regions);
|
||||||
std::span<u8> inline_output);
|
|
||||||
|
|
||||||
void FreeMappingLocked(u64 offset);
|
void FreeMappingLocked(u64 offset);
|
||||||
|
|
||||||
@@ -213,7 +212,6 @@ private:
|
|||||||
bool initialised{};
|
bool initialised{};
|
||||||
} vm;
|
} vm;
|
||||||
std::shared_ptr<Tegra::MemoryManager> gmmu;
|
std::shared_ptr<Tegra::MemoryManager> gmmu;
|
||||||
Common::ScratchBuffer<IoctlRemapEntry> entries;
|
|
||||||
|
|
||||||
// s32 channel{};
|
// s32 channel{};
|
||||||
// u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
|
// u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
#include "core/hle/kernel/k_event.h"
|
#include "core/hle/kernel/k_event.h"
|
||||||
#include "core/hle/service/nvdrv/core/container.h"
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||||
|
#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
#include "video_core/host1x/host1x.h"
|
#include "video_core/host1x/host1x.h"
|
||||||
@@ -40,19 +41,19 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inp
|
|||||||
case 0x0:
|
case 0x0:
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x1b:
|
case 0x1b:
|
||||||
return NvOsGetConfigU32(input, output);
|
return WrapFixed(this, &nvhost_ctrl::NvOsGetConfigU32, input, output);
|
||||||
case 0x1c:
|
case 0x1c:
|
||||||
return IocCtrlClearEventWait(input, output);
|
return WrapFixed(this, &nvhost_ctrl::IocCtrlClearEventWait, input, output);
|
||||||
case 0x1d:
|
case 0x1d:
|
||||||
return IocCtrlEventWait(input, output, true);
|
return WrapFixed(this, &nvhost_ctrl::IocCtrlEventWait, input, output, true);
|
||||||
case 0x1e:
|
case 0x1e:
|
||||||
return IocCtrlEventWait(input, output, false);
|
return WrapFixed(this, &nvhost_ctrl::IocCtrlEventWait, input, output, false);
|
||||||
case 0x1f:
|
case 0x1f:
|
||||||
return IocCtrlEventRegister(input, output);
|
return WrapFixed(this, &nvhost_ctrl::IocCtrlEventRegister, input, output);
|
||||||
case 0x20:
|
case 0x20:
|
||||||
return IocCtrlEventUnregister(input, output);
|
return WrapFixed(this, &nvhost_ctrl::IocCtrlEventUnregister, input, output);
|
||||||
case 0x21:
|
case 0x21:
|
||||||
return IocCtrlEventUnregisterBatch(input, output);
|
return WrapFixed(this, &nvhost_ctrl::IocCtrlEventUnregisterBatch, input, output);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@@ -79,25 +80,19 @@ void nvhost_ctrl::OnOpen(DeviceFD fd) {}
|
|||||||
|
|
||||||
void nvhost_ctrl::OnClose(DeviceFD fd) {}
|
void nvhost_ctrl::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::NvOsGetConfigU32(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl::NvOsGetConfigU32(IocGetConfigParams& params) {
|
||||||
IocGetConfigParams params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
LOG_TRACE(Service_NVDRV, "called, setting={}!{}", params.domain_str.data(),
|
LOG_TRACE(Service_NVDRV, "called, setting={}!{}", params.domain_str.data(),
|
||||||
params.param_str.data());
|
params.param_str.data());
|
||||||
return NvResult::ConfigVarNotFound; // Returns error on production mode
|
return NvResult::ConfigVarNotFound; // Returns error on production mode
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::IocCtrlEventWait(std::span<const u8> input, std::span<u8> output,
|
NvResult nvhost_ctrl::IocCtrlEventWait(IocCtrlEventWaitParams& params, bool is_allocation) {
|
||||||
bool is_allocation) {
|
|
||||||
IocCtrlEventWaitParams params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
|
LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
|
||||||
params.fence.id, params.fence.value, params.timeout, is_allocation);
|
params.fence.id, params.fence.value, params.timeout, is_allocation);
|
||||||
|
|
||||||
bool must_unmark_fail = !is_allocation;
|
bool must_unmark_fail = !is_allocation;
|
||||||
const u32 event_id = params.value.raw;
|
const u32 event_id = params.value.raw;
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT({
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
if (must_unmark_fail) {
|
if (must_unmark_fail) {
|
||||||
events[event_id].fails = 0;
|
events[event_id].fails = 0;
|
||||||
}
|
}
|
||||||
@@ -231,9 +226,7 @@ NvResult nvhost_ctrl::FreeEvent(u32 slot) {
|
|||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::IocCtrlEventRegister(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl::IocCtrlEventRegister(IocCtrlEventRegisterParams& params) {
|
||||||
IocCtrlEventRegisterParams params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
const u32 event_id = params.user_event_id;
|
const u32 event_id = params.user_event_id;
|
||||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||||
if (event_id >= MaxNvEvents) {
|
if (event_id >= MaxNvEvents) {
|
||||||
@@ -252,9 +245,7 @@ NvResult nvhost_ctrl::IocCtrlEventRegister(std::span<const u8> input, std::span<
|
|||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::IocCtrlEventUnregister(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl::IocCtrlEventUnregister(IocCtrlEventUnregisterParams& params) {
|
||||||
IocCtrlEventUnregisterParams params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
const u32 event_id = params.user_event_id & 0x00FF;
|
const u32 event_id = params.user_event_id & 0x00FF;
|
||||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||||
|
|
||||||
@@ -262,9 +253,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(std::span<const u8> input, std::spa
|
|||||||
return FreeEvent(event_id);
|
return FreeEvent(event_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(IocCtrlEventUnregisterBatchParams& params) {
|
||||||
IocCtrlEventUnregisterBatchParams params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
u64 event_mask = params.user_events;
|
u64 event_mask = params.user_events;
|
||||||
LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask);
|
LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask);
|
||||||
|
|
||||||
@@ -280,10 +269,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(std::span<const u8> input, std
|
|||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::IocCtrlClearEventWait(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl::IocCtrlClearEventWait(IocCtrlEventClearParams& params) {
|
||||||
IocCtrlEventClearParams params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
|
|
||||||
u32 event_id = params.event_id.slot;
|
u32 event_id = params.event_id.slot;
|
||||||
LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id);
|
LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id);
|
||||||
|
|
||||||
|
|||||||
@@ -186,12 +186,12 @@ private:
|
|||||||
static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8,
|
static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8,
|
||||||
"IocCtrlEventKill is incorrect size");
|
"IocCtrlEventKill is incorrect size");
|
||||||
|
|
||||||
NvResult NvOsGetConfigU32(std::span<const u8> input, std::span<u8> output);
|
NvResult NvOsGetConfigU32(IocGetConfigParams& params);
|
||||||
NvResult IocCtrlEventWait(std::span<const u8> input, std::span<u8> output, bool is_allocation);
|
NvResult IocCtrlEventRegister(IocCtrlEventRegisterParams& params);
|
||||||
NvResult IocCtrlEventRegister(std::span<const u8> input, std::span<u8> output);
|
NvResult IocCtrlEventUnregister(IocCtrlEventUnregisterParams& params);
|
||||||
NvResult IocCtrlEventUnregister(std::span<const u8> input, std::span<u8> output);
|
NvResult IocCtrlEventUnregisterBatch(IocCtrlEventUnregisterBatchParams& params);
|
||||||
NvResult IocCtrlEventUnregisterBatch(std::span<const u8> input, std::span<u8> output);
|
NvResult IocCtrlEventWait(IocCtrlEventWaitParams& params, bool is_allocation);
|
||||||
NvResult IocCtrlClearEventWait(std::span<const u8> input, std::span<u8> output);
|
NvResult IocCtrlClearEventWait(IocCtrlEventClearParams& params);
|
||||||
|
|
||||||
NvResult FreeEvent(u32 slot);
|
NvResult FreeEvent(u32 slot);
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
|
#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
|
||||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
|
|
||||||
@@ -27,23 +28,23 @@ NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8>
|
|||||||
case 'G':
|
case 'G':
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x1:
|
case 0x1:
|
||||||
return ZCullGetCtxSize(input, output);
|
return WrapFixed(this, &nvhost_ctrl_gpu::ZCullGetCtxSize, input, output);
|
||||||
case 0x2:
|
case 0x2:
|
||||||
return ZCullGetInfo(input, output);
|
return WrapFixed(this, &nvhost_ctrl_gpu::ZCullGetInfo, input, output);
|
||||||
case 0x3:
|
case 0x3:
|
||||||
return ZBCSetTable(input, output);
|
return WrapFixed(this, &nvhost_ctrl_gpu::ZBCSetTable, input, output);
|
||||||
case 0x4:
|
case 0x4:
|
||||||
return ZBCQueryTable(input, output);
|
return WrapFixed(this, &nvhost_ctrl_gpu::ZBCQueryTable, input, output);
|
||||||
case 0x5:
|
case 0x5:
|
||||||
return GetCharacteristics(input, output);
|
return WrapFixed(this, &nvhost_ctrl_gpu::GetCharacteristics1, input, output);
|
||||||
case 0x6:
|
case 0x6:
|
||||||
return GetTPCMasks(input, output);
|
return WrapFixed(this, &nvhost_ctrl_gpu::GetTPCMasks1, input, output);
|
||||||
case 0x7:
|
case 0x7:
|
||||||
return FlushL2(input, output);
|
return WrapFixed(this, &nvhost_ctrl_gpu::FlushL2, input, output);
|
||||||
case 0x14:
|
case 0x14:
|
||||||
return GetActiveSlotMask(input, output);
|
return WrapFixed(this, &nvhost_ctrl_gpu::GetActiveSlotMask, input, output);
|
||||||
case 0x1c:
|
case 0x1c:
|
||||||
return GetGpuTime(input, output);
|
return WrapFixed(this, &nvhost_ctrl_gpu::GetGpuTime, input, output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -65,9 +66,11 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8>
|
|||||||
case 'G':
|
case 'G':
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x5:
|
case 0x5:
|
||||||
return GetCharacteristics(input, output, inline_output);
|
return WrapFixedInlOut(this, &nvhost_ctrl_gpu::GetCharacteristics3, input, output,
|
||||||
|
inline_output);
|
||||||
case 0x6:
|
case 0x6:
|
||||||
return GetTPCMasks(input, output, inline_output);
|
return WrapFixedInlOut(this, &nvhost_ctrl_gpu::GetTPCMasks3, input, output,
|
||||||
|
inline_output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -82,10 +85,8 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8>
|
|||||||
void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {}
|
void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {}
|
||||||
void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {}
|
void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
IoctlCharacteristics params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
params.gc.arch = 0x120;
|
params.gc.arch = 0x120;
|
||||||
params.gc.impl = 0xb;
|
params.gc.impl = 0xb;
|
||||||
params.gc.rev = 0xa1;
|
params.gc.rev = 0xa1;
|
||||||
@@ -123,15 +124,13 @@ NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::spa
|
|||||||
params.gc.gr_compbit_store_base_hw = 0x0;
|
params.gc.gr_compbit_store_base_hw = 0x0;
|
||||||
params.gpu_characteristics_buf_size = 0xA0;
|
params.gpu_characteristics_buf_size = 0xA0;
|
||||||
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
|
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::span<u8> output,
|
NvResult nvhost_ctrl_gpu::GetCharacteristics3(
|
||||||
std::span<u8> inline_output) {
|
IoctlCharacteristics& params, std::span<IoctlGpuCharacteristics> gpu_characteristics) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
IoctlCharacteristics params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
params.gc.arch = 0x120;
|
params.gc.arch = 0x120;
|
||||||
params.gc.impl = 0xb;
|
params.gc.impl = 0xb;
|
||||||
params.gc.rev = 0xa1;
|
params.gc.rev = 0xa1;
|
||||||
@@ -169,70 +168,47 @@ NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::spa
|
|||||||
params.gc.gr_compbit_store_base_hw = 0x0;
|
params.gc.gr_compbit_store_base_hw = 0x0;
|
||||||
params.gpu_characteristics_buf_size = 0xA0;
|
params.gpu_characteristics_buf_size = 0xA0;
|
||||||
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
|
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
|
||||||
|
if (!gpu_characteristics.empty()) {
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
gpu_characteristics.front() = params.gc;
|
||||||
std::memcpy(inline_output.data(), ¶ms.gc, inline_output.size());
|
}
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::GetTPCMasks(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl_gpu::GetTPCMasks1(IoctlGpuGetTpcMasksArgs& params) {
|
||||||
IoctlGpuGetTpcMasksArgs params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
|
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
|
||||||
if (params.mask_buffer_size != 0) {
|
if (params.mask_buffer_size != 0) {
|
||||||
params.tcp_mask = 3;
|
params.tcp_mask = 3;
|
||||||
}
|
}
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::GetTPCMasks(std::span<const u8> input, std::span<u8> output,
|
NvResult nvhost_ctrl_gpu::GetTPCMasks3(IoctlGpuGetTpcMasksArgs& params, std::span<u32> tpc_mask) {
|
||||||
std::span<u8> inline_output) {
|
|
||||||
IoctlGpuGetTpcMasksArgs params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
|
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
|
||||||
if (params.mask_buffer_size != 0) {
|
if (params.mask_buffer_size != 0) {
|
||||||
params.tcp_mask = 3;
|
params.tcp_mask = 3;
|
||||||
}
|
}
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
if (!tpc_mask.empty()) {
|
||||||
std::memcpy(inline_output.data(), ¶ms.tcp_mask, inline_output.size());
|
tpc_mask.front() = params.tcp_mask;
|
||||||
|
}
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::GetActiveSlotMask(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl_gpu::GetActiveSlotMask(IoctlActiveSlotMask& params) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
|
|
||||||
IoctlActiveSlotMask params{};
|
|
||||||
if (input.size() > 0) {
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
}
|
|
||||||
params.slot = 0x07;
|
params.slot = 0x07;
|
||||||
params.mask = 0x01;
|
params.mask = 0x01;
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(IoctlZcullGetCtxSize& params) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
|
|
||||||
IoctlZcullGetCtxSize params{};
|
|
||||||
if (input.size() > 0) {
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
}
|
|
||||||
params.size = 0x1;
|
params.size = 0x1;
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::ZCullGetInfo(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl_gpu::ZCullGetInfo(IoctlNvgpuGpuZcullGetInfoArgs& params) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
|
|
||||||
IoctlNvgpuGpuZcullGetInfoArgs params{};
|
|
||||||
|
|
||||||
if (input.size() > 0) {
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
params.width_align_pixels = 0x20;
|
params.width_align_pixels = 0x20;
|
||||||
params.height_align_pixels = 0x20;
|
params.height_align_pixels = 0x20;
|
||||||
params.pixel_squares_by_aliquots = 0x400;
|
params.pixel_squares_by_aliquots = 0x400;
|
||||||
@@ -243,53 +219,28 @@ NvResult nvhost_ctrl_gpu::ZCullGetInfo(std::span<const u8> input, std::span<u8>
|
|||||||
params.subregion_width_align_pixels = 0x20;
|
params.subregion_width_align_pixels = 0x20;
|
||||||
params.subregion_height_align_pixels = 0x40;
|
params.subregion_height_align_pixels = 0x40;
|
||||||
params.subregion_count = 0x10;
|
params.subregion_count = 0x10;
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::ZBCSetTable(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl_gpu::ZBCSetTable(IoctlZbcSetTable& params) {
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||||
|
|
||||||
IoctlZbcSetTable params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
// TODO(ogniK): What does this even actually do?
|
// TODO(ogniK): What does this even actually do?
|
||||||
|
|
||||||
// Prevent null pointer being passed as arg 1
|
|
||||||
if (output.empty()) {
|
|
||||||
LOG_WARNING(Service_NVDRV, "Avoiding passing null pointer to memcpy");
|
|
||||||
} else {
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
}
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::ZBCQueryTable(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl_gpu::ZBCQueryTable(IoctlZbcQueryTable& params) {
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||||
|
|
||||||
IoctlZbcQueryTable params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
// TODO : To implement properly
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::FlushL2(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl_gpu::FlushL2(IoctlFlushL2& params) {
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||||
|
|
||||||
IoctlFlushL2 params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
// TODO : To implement properly
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::GetGpuTime(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_ctrl_gpu::GetGpuTime(IoctlGetGpuTime& params) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
|
|
||||||
IoctlGetGpuTime params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
params.gpu_time = static_cast<u64_le>(system.CoreTiming().GetGlobalTimeNs().count());
|
params.gpu_time = static_cast<u64_le>(system.CoreTiming().GetGlobalTimeNs().count());
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -151,21 +151,20 @@ private:
|
|||||||
};
|
};
|
||||||
static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size");
|
static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size");
|
||||||
|
|
||||||
NvResult GetCharacteristics(std::span<const u8> input, std::span<u8> output);
|
NvResult GetCharacteristics1(IoctlCharacteristics& params);
|
||||||
NvResult GetCharacteristics(std::span<const u8> input, std::span<u8> output,
|
NvResult GetCharacteristics3(IoctlCharacteristics& params,
|
||||||
std::span<u8> inline_output);
|
std::span<IoctlGpuCharacteristics> gpu_characteristics);
|
||||||
|
|
||||||
NvResult GetTPCMasks(std::span<const u8> input, std::span<u8> output);
|
NvResult GetTPCMasks1(IoctlGpuGetTpcMasksArgs& params);
|
||||||
NvResult GetTPCMasks(std::span<const u8> input, std::span<u8> output,
|
NvResult GetTPCMasks3(IoctlGpuGetTpcMasksArgs& params, std::span<u32> tpc_mask);
|
||||||
std::span<u8> inline_output);
|
|
||||||
|
|
||||||
NvResult GetActiveSlotMask(std::span<const u8> input, std::span<u8> output);
|
NvResult GetActiveSlotMask(IoctlActiveSlotMask& params);
|
||||||
NvResult ZCullGetCtxSize(std::span<const u8> input, std::span<u8> output);
|
NvResult ZCullGetCtxSize(IoctlZcullGetCtxSize& params);
|
||||||
NvResult ZCullGetInfo(std::span<const u8> input, std::span<u8> output);
|
NvResult ZCullGetInfo(IoctlNvgpuGpuZcullGetInfoArgs& params);
|
||||||
NvResult ZBCSetTable(std::span<const u8> input, std::span<u8> output);
|
NvResult ZBCSetTable(IoctlZbcSetTable& params);
|
||||||
NvResult ZBCQueryTable(std::span<const u8> input, std::span<u8> output);
|
NvResult ZBCQueryTable(IoctlZbcQueryTable& params);
|
||||||
NvResult FlushL2(std::span<const u8> input, std::span<u8> output);
|
NvResult FlushL2(IoctlFlushL2& params);
|
||||||
NvResult GetGpuTime(std::span<const u8> input, std::span<u8> output);
|
NvResult GetGpuTime(IoctlGetGpuTime& params);
|
||||||
|
|
||||||
EventInterface& events_interface;
|
EventInterface& events_interface;
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,7 @@
|
|||||||
#include "core/hle/service/nvdrv/core/container.h"
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||||
|
#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
@@ -52,7 +53,7 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
|||||||
case 0x0:
|
case 0x0:
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x3:
|
case 0x3:
|
||||||
return GetWaitbase(input, output);
|
return WrapFixed(this, &nvhost_gpu::GetWaitbase, input, output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -60,25 +61,25 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
|||||||
case 'H':
|
case 'H':
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x1:
|
case 0x1:
|
||||||
return SetNVMAPfd(input, output);
|
return WrapFixed(this, &nvhost_gpu::SetNVMAPfd, input, output);
|
||||||
case 0x3:
|
case 0x3:
|
||||||
return ChannelSetTimeout(input, output);
|
return WrapFixed(this, &nvhost_gpu::ChannelSetTimeout, input, output);
|
||||||
case 0x8:
|
case 0x8:
|
||||||
return SubmitGPFIFOBase(input, output, false);
|
return WrapFixedVariable(this, &nvhost_gpu::SubmitGPFIFOBase1, input, output, false);
|
||||||
case 0x9:
|
case 0x9:
|
||||||
return AllocateObjectContext(input, output);
|
return WrapFixed(this, &nvhost_gpu::AllocateObjectContext, input, output);
|
||||||
case 0xb:
|
case 0xb:
|
||||||
return ZCullBind(input, output);
|
return WrapFixed(this, &nvhost_gpu::ZCullBind, input, output);
|
||||||
case 0xc:
|
case 0xc:
|
||||||
return SetErrorNotifier(input, output);
|
return WrapFixed(this, &nvhost_gpu::SetErrorNotifier, input, output);
|
||||||
case 0xd:
|
case 0xd:
|
||||||
return SetChannelPriority(input, output);
|
return WrapFixed(this, &nvhost_gpu::SetChannelPriority, input, output);
|
||||||
case 0x1a:
|
case 0x1a:
|
||||||
return AllocGPFIFOEx2(input, output);
|
return WrapFixed(this, &nvhost_gpu::AllocGPFIFOEx2, input, output);
|
||||||
case 0x1b:
|
case 0x1b:
|
||||||
return SubmitGPFIFOBase(input, output, true);
|
return WrapFixedVariable(this, &nvhost_gpu::SubmitGPFIFOBase1, input, output, true);
|
||||||
case 0x1d:
|
case 0x1d:
|
||||||
return ChannelSetTimeslice(input, output);
|
return WrapFixed(this, &nvhost_gpu::ChannelSetTimeslice, input, output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -86,9 +87,9 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
|||||||
case 'G':
|
case 'G':
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x14:
|
case 0x14:
|
||||||
return SetClientData(input, output);
|
return WrapFixed(this, &nvhost_gpu::SetClientData, input, output);
|
||||||
case 0x15:
|
case 0x15:
|
||||||
return GetClientData(input, output);
|
return WrapFixed(this, &nvhost_gpu::GetClientData, input, output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -104,7 +105,8 @@ NvResult nvhost_gpu::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
|||||||
case 'H':
|
case 'H':
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x1b:
|
case 0x1b:
|
||||||
return SubmitGPFIFOBase(input, inline_input, output);
|
return WrapFixedInlIn(this, &nvhost_gpu::SubmitGPFIFOBase2, input, inline_input,
|
||||||
|
output);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -121,63 +123,45 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
|||||||
void nvhost_gpu::OnOpen(DeviceFD fd) {}
|
void nvhost_gpu::OnOpen(DeviceFD fd) {}
|
||||||
void nvhost_gpu::OnClose(DeviceFD fd) {}
|
void nvhost_gpu::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
NvResult nvhost_gpu::SetNVMAPfd(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) {
|
||||||
IoctlSetNvmapFD params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
||||||
|
|
||||||
nvmap_fd = params.nvmap_fd;
|
nvmap_fd = params.nvmap_fd;
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::SetClientData(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_gpu::SetClientData(IoctlClientData& params) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
|
|
||||||
IoctlClientData params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
user_data = params.data;
|
user_data = params.data;
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::GetClientData(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_gpu::GetClientData(IoctlClientData& params) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
|
|
||||||
IoctlClientData params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
params.data = user_data;
|
params.data = user_data;
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::ZCullBind(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_gpu::ZCullBind(IoctlZCullBind& params) {
|
||||||
std::memcpy(&zcull_params, input.data(), input.size());
|
zcull_params = params;
|
||||||
LOG_DEBUG(Service_NVDRV, "called, gpu_va={:X}, mode={:X}", zcull_params.gpu_va,
|
LOG_DEBUG(Service_NVDRV, "called, gpu_va={:X}, mode={:X}", zcull_params.gpu_va,
|
||||||
zcull_params.mode);
|
zcull_params.mode);
|
||||||
|
|
||||||
std::memcpy(output.data(), &zcull_params, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::SetErrorNotifier(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_gpu::SetErrorNotifier(IoctlSetErrorNotifier& params) {
|
||||||
IoctlSetErrorNotifier params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, offset={:X}, size={:X}, mem={:X}", params.offset,
|
LOG_WARNING(Service_NVDRV, "(STUBBED) called, offset={:X}, size={:X}, mem={:X}", params.offset,
|
||||||
params.size, params.mem);
|
params.size, params.mem);
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::SetChannelPriority(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_gpu::SetChannelPriority(IoctlChannelSetPriority& params) {
|
||||||
std::memcpy(&channel_priority, input.data(), input.size());
|
channel_priority = params.priority;
|
||||||
LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority);
|
LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority);
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_gpu::AllocGPFIFOEx2(IoctlAllocGpfifoEx2& params) {
|
||||||
IoctlAllocGpfifoEx2 params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
LOG_WARNING(Service_NVDRV,
|
LOG_WARNING(Service_NVDRV,
|
||||||
"(STUBBED) called, num_entries={:X}, flags={:X}, unk0={:X}, "
|
"(STUBBED) called, num_entries={:X}, flags={:X}, unk0={:X}, "
|
||||||
"unk1={:X}, unk2={:X}, unk3={:X}",
|
"unk1={:X}, unk2={:X}, unk3={:X}",
|
||||||
@@ -193,18 +177,14 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> out
|
|||||||
|
|
||||||
params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
|
params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::AllocateObjectContext(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_gpu::AllocateObjectContext(IoctlAllocObjCtx& params) {
|
||||||
IoctlAllocObjCtx params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, class_num={:X}, flags={:X}", params.class_num,
|
LOG_WARNING(Service_NVDRV, "(STUBBED) called, class_num={:X}, flags={:X}", params.class_num,
|
||||||
params.flags);
|
params.flags);
|
||||||
|
|
||||||
params.obj_id = 0x0;
|
params.obj_id = 0x0;
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -248,8 +228,7 @@ static boost::container::small_vector<Tegra::CommandHeader, 512> BuildIncrementW
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> output,
|
NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, Tegra::CommandList&& entries) {
|
||||||
Tegra::CommandList&& entries) {
|
|
||||||
LOG_TRACE(Service_NVDRV, "called, gpfifo={:X}, num_entries={:X}, flags={:X}", params.address,
|
LOG_TRACE(Service_NVDRV, "called, gpfifo={:X}, num_entries={:X}, flags={:X}", params.address,
|
||||||
params.num_entries, params.flags.raw);
|
params.num_entries, params.flags.raw);
|
||||||
|
|
||||||
@@ -290,65 +269,55 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> o
|
|||||||
|
|
||||||
flags.raw = 0;
|
flags.raw = 0;
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo));
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::SubmitGPFIFOBase(std::span<const u8> input, std::span<u8> output,
|
NvResult nvhost_gpu::SubmitGPFIFOBase1(IoctlSubmitGpfifo& params,
|
||||||
bool kickoff) {
|
std::span<Tegra::CommandListHeader> commands, bool kickoff) {
|
||||||
if (input.size() < sizeof(IoctlSubmitGpfifo)) {
|
if (params.num_entries > commands.size()) {
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
return NvResult::InvalidSize;
|
return NvResult::InvalidSize;
|
||||||
}
|
}
|
||||||
IoctlSubmitGpfifo params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo));
|
|
||||||
Tegra::CommandList entries(params.num_entries);
|
|
||||||
|
|
||||||
|
Tegra::CommandList entries(params.num_entries);
|
||||||
if (kickoff) {
|
if (kickoff) {
|
||||||
system.ApplicationMemory().ReadBlock(params.address, entries.command_lists.data(),
|
system.ApplicationMemory().ReadBlock(params.address, entries.command_lists.data(),
|
||||||
params.num_entries * sizeof(Tegra::CommandListHeader));
|
params.num_entries * sizeof(Tegra::CommandListHeader));
|
||||||
} else {
|
} else {
|
||||||
std::memcpy(entries.command_lists.data(), &input[sizeof(IoctlSubmitGpfifo)],
|
std::memcpy(entries.command_lists.data(), commands.data(),
|
||||||
params.num_entries * sizeof(Tegra::CommandListHeader));
|
params.num_entries * sizeof(Tegra::CommandListHeader));
|
||||||
}
|
}
|
||||||
|
|
||||||
return SubmitGPFIFOImpl(params, output, std::move(entries));
|
return SubmitGPFIFOImpl(params, std::move(entries));
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::SubmitGPFIFOBase(std::span<const u8> input, std::span<const u8> input_inline,
|
NvResult nvhost_gpu::SubmitGPFIFOBase2(IoctlSubmitGpfifo& params,
|
||||||
std::span<u8> output) {
|
std::span<const Tegra::CommandListHeader> commands) {
|
||||||
if (input.size() < sizeof(IoctlSubmitGpfifo)) {
|
if (params.num_entries > commands.size()) {
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
return NvResult::InvalidSize;
|
return NvResult::InvalidSize;
|
||||||
}
|
}
|
||||||
IoctlSubmitGpfifo params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo));
|
|
||||||
Tegra::CommandList entries(params.num_entries);
|
Tegra::CommandList entries(params.num_entries);
|
||||||
std::memcpy(entries.command_lists.data(), input_inline.data(), input_inline.size());
|
std::memcpy(entries.command_lists.data(), commands.data(),
|
||||||
return SubmitGPFIFOImpl(params, output, std::move(entries));
|
params.num_entries * sizeof(Tegra::CommandListHeader));
|
||||||
|
return SubmitGPFIFOImpl(params, std::move(entries));
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::GetWaitbase(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_gpu::GetWaitbase(IoctlGetWaitbase& params) {
|
||||||
IoctlGetWaitbase params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase));
|
|
||||||
LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown);
|
LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown);
|
||||||
|
|
||||||
params.value = 0; // Seems to be hard coded at 0
|
params.value = 0; // Seems to be hard coded at 0
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::ChannelSetTimeout(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_gpu::ChannelSetTimeout(IoctlChannelSetTimeout& params) {
|
||||||
IoctlChannelSetTimeout params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlChannelSetTimeout));
|
|
||||||
LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout);
|
LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout);
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::ChannelSetTimeslice(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_gpu::ChannelSetTimeslice(IoctlSetTimeslice& params) {
|
||||||
IoctlSetTimeslice params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSetTimeslice));
|
|
||||||
LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice);
|
LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice);
|
||||||
|
|
||||||
channel_timeslice = params.timeslice;
|
channel_timeslice = params.timeslice;
|
||||||
|
|||||||
@@ -186,23 +186,24 @@ private:
|
|||||||
u32_le channel_priority{};
|
u32_le channel_priority{};
|
||||||
u32_le channel_timeslice{};
|
u32_le channel_timeslice{};
|
||||||
|
|
||||||
NvResult SetNVMAPfd(std::span<const u8> input, std::span<u8> output);
|
NvResult SetNVMAPfd(IoctlSetNvmapFD& params);
|
||||||
NvResult SetClientData(std::span<const u8> input, std::span<u8> output);
|
NvResult SetClientData(IoctlClientData& params);
|
||||||
NvResult GetClientData(std::span<const u8> input, std::span<u8> output);
|
NvResult GetClientData(IoctlClientData& params);
|
||||||
NvResult ZCullBind(std::span<const u8> input, std::span<u8> output);
|
NvResult ZCullBind(IoctlZCullBind& params);
|
||||||
NvResult SetErrorNotifier(std::span<const u8> input, std::span<u8> output);
|
NvResult SetErrorNotifier(IoctlSetErrorNotifier& params);
|
||||||
NvResult SetChannelPriority(std::span<const u8> input, std::span<u8> output);
|
NvResult SetChannelPriority(IoctlChannelSetPriority& params);
|
||||||
NvResult AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> output);
|
NvResult AllocGPFIFOEx2(IoctlAllocGpfifoEx2& params);
|
||||||
NvResult AllocateObjectContext(std::span<const u8> input, std::span<u8> output);
|
NvResult AllocateObjectContext(IoctlAllocObjCtx& params);
|
||||||
NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> output,
|
|
||||||
Tegra::CommandList&& entries);
|
NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, Tegra::CommandList&& entries);
|
||||||
NvResult SubmitGPFIFOBase(std::span<const u8> input, std::span<u8> output,
|
NvResult SubmitGPFIFOBase1(IoctlSubmitGpfifo& params,
|
||||||
bool kickoff = false);
|
std::span<Tegra::CommandListHeader> commands, bool kickoff = false);
|
||||||
NvResult SubmitGPFIFOBase(std::span<const u8> input, std::span<const u8> input_inline,
|
NvResult SubmitGPFIFOBase2(IoctlSubmitGpfifo& params,
|
||||||
std::span<u8> output);
|
std::span<const Tegra::CommandListHeader> commands);
|
||||||
NvResult GetWaitbase(std::span<const u8> input, std::span<u8> output);
|
|
||||||
NvResult ChannelSetTimeout(std::span<const u8> input, std::span<u8> output);
|
NvResult GetWaitbase(IoctlGetWaitbase& params);
|
||||||
NvResult ChannelSetTimeslice(std::span<const u8> input, std::span<u8> output);
|
NvResult ChannelSetTimeout(IoctlChannelSetTimeout& params);
|
||||||
|
NvResult ChannelSetTimeslice(IoctlSetTimeslice& params);
|
||||||
|
|
||||||
EventInterface& events_interface;
|
EventInterface& events_interface;
|
||||||
NvCore::Container& core;
|
NvCore::Container& core;
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/service/nvdrv/core/container.h"
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
|
#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
|
||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
|
|
||||||
@@ -25,18 +26,18 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
|
|||||||
if (!host1x_file.fd_to_id.contains(fd)) {
|
if (!host1x_file.fd_to_id.contains(fd)) {
|
||||||
host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++;
|
host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++;
|
||||||
}
|
}
|
||||||
return Submit(fd, input, output);
|
return WrapFixedVariable(this, &nvhost_nvdec::Submit, input, output, fd);
|
||||||
}
|
}
|
||||||
case 0x2:
|
case 0x2:
|
||||||
return GetSyncpoint(input, output);
|
return WrapFixed(this, &nvhost_nvdec::GetSyncpoint, input, output);
|
||||||
case 0x3:
|
case 0x3:
|
||||||
return GetWaitbase(input, output);
|
return WrapFixed(this, &nvhost_nvdec::GetWaitbase, input, output);
|
||||||
case 0x7:
|
case 0x7:
|
||||||
return SetSubmitTimeout(input, output);
|
return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output);
|
||||||
case 0x9:
|
case 0x9:
|
||||||
return MapBuffer(input, output);
|
return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output);
|
||||||
case 0xa:
|
case 0xa:
|
||||||
return UnmapBuffer(input, output);
|
return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -44,7 +45,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
|
|||||||
case 'H':
|
case 'H':
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x1:
|
case 0x1:
|
||||||
return SetNVMAPfd(input);
|
return WrapFixed(this, &nvhost_nvdec::SetNVMAPfd, input, output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,6 +29,9 @@ std::size_t SliceVectors(std::span<const u8> input, std::vector<T>& dst, std::si
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
const size_t bytes_copied = count * sizeof(T);
|
const size_t bytes_copied = count * sizeof(T);
|
||||||
|
if (input.size() < offset + bytes_copied) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
std::memcpy(dst.data(), input.data() + offset, bytes_copied);
|
std::memcpy(dst.data(), input.data() + offset, bytes_copied);
|
||||||
return bytes_copied;
|
return bytes_copied;
|
||||||
}
|
}
|
||||||
@@ -41,6 +44,9 @@ std::size_t WriteVectors(std::span<u8> dst, const std::vector<T>& src, std::size
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
const size_t bytes_copied = src.size() * sizeof(T);
|
const size_t bytes_copied = src.size() * sizeof(T);
|
||||||
|
if (dst.size() < offset + bytes_copied) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
std::memcpy(dst.data() + offset, src.data(), bytes_copied);
|
std::memcpy(dst.data() + offset, src.data(), bytes_copied);
|
||||||
return bytes_copied;
|
return bytes_copied;
|
||||||
}
|
}
|
||||||
@@ -63,18 +69,14 @@ nvhost_nvdec_common::~nvhost_nvdec_common() {
|
|||||||
core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint);
|
core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint);
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::SetNVMAPfd(std::span<const u8> input) {
|
NvResult nvhost_nvdec_common::SetNVMAPfd(IoctlSetNvmapFD& params) {
|
||||||
IoctlSetNvmapFD params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSetNvmapFD));
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
||||||
|
|
||||||
nvmap_fd = params.nvmap_fd;
|
nvmap_fd = params.nvmap_fd;
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, DeviceFD fd) {
|
||||||
IoctlSubmit params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmit));
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count);
|
LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count);
|
||||||
|
|
||||||
// Instantiate param buffers
|
// Instantiate param buffers
|
||||||
@@ -85,12 +87,12 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std
|
|||||||
std::vector<u32> fence_thresholds(params.fence_count);
|
std::vector<u32> fence_thresholds(params.fence_count);
|
||||||
|
|
||||||
// Slice input into their respective buffers
|
// Slice input into their respective buffers
|
||||||
std::size_t offset = sizeof(IoctlSubmit);
|
std::size_t offset = 0;
|
||||||
offset += SliceVectors(input, command_buffers, params.cmd_buffer_count, offset);
|
offset += SliceVectors(data, command_buffers, params.cmd_buffer_count, offset);
|
||||||
offset += SliceVectors(input, relocs, params.relocation_count, offset);
|
offset += SliceVectors(data, relocs, params.relocation_count, offset);
|
||||||
offset += SliceVectors(input, reloc_shifts, params.relocation_count, offset);
|
offset += SliceVectors(data, reloc_shifts, params.relocation_count, offset);
|
||||||
offset += SliceVectors(input, syncpt_increments, params.syncpoint_count, offset);
|
offset += SliceVectors(data, syncpt_increments, params.syncpoint_count, offset);
|
||||||
offset += SliceVectors(input, fence_thresholds, params.fence_count, offset);
|
offset += SliceVectors(data, fence_thresholds, params.fence_count, offset);
|
||||||
|
|
||||||
auto& gpu = system.GPU();
|
auto& gpu = system.GPU();
|
||||||
if (gpu.UseNvdec()) {
|
if (gpu.UseNvdec()) {
|
||||||
@@ -108,72 +110,51 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std
|
|||||||
cmdlist.size() * sizeof(u32));
|
cmdlist.size() * sizeof(u32));
|
||||||
gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
|
gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
|
||||||
}
|
}
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit));
|
|
||||||
// Some games expect command_buffers to be written back
|
// Some games expect command_buffers to be written back
|
||||||
offset = sizeof(IoctlSubmit);
|
offset = 0;
|
||||||
offset += WriteVectors(output, command_buffers, offset);
|
offset += WriteVectors(data, command_buffers, offset);
|
||||||
offset += WriteVectors(output, relocs, offset);
|
offset += WriteVectors(data, relocs, offset);
|
||||||
offset += WriteVectors(output, reloc_shifts, offset);
|
offset += WriteVectors(data, reloc_shifts, offset);
|
||||||
offset += WriteVectors(output, syncpt_increments, offset);
|
offset += WriteVectors(data, syncpt_increments, offset);
|
||||||
offset += WriteVectors(output, fence_thresholds, offset);
|
offset += WriteVectors(data, fence_thresholds, offset);
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::GetSyncpoint(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_nvdec_common::GetSyncpoint(IoctlGetSyncpoint& params) {
|
||||||
IoctlGetSyncpoint params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint));
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
|
LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
|
||||||
|
|
||||||
// const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast<u32>(channel_type)]};
|
|
||||||
params.value = channel_syncpoint;
|
params.value = channel_syncpoint;
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint));
|
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::GetWaitbase(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) {
|
||||||
IoctlGetWaitbase params{};
|
|
||||||
LOG_CRITICAL(Service_NVDRV, "called WAITBASE");
|
LOG_CRITICAL(Service_NVDRV, "called WAITBASE");
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase));
|
|
||||||
params.value = 0; // Seems to be hard coded at 0
|
params.value = 0; // Seems to be hard coded at 0
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase));
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::MapBuffer(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries) {
|
||||||
IoctlMapBuffer params{};
|
const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer));
|
for (size_t i = 0; i < num_entries; i++) {
|
||||||
std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
|
entries[i].map_address = nvmap.PinHandle(entries[i].map_handle);
|
||||||
|
|
||||||
SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
|
|
||||||
|
|
||||||
for (auto& cmd_buffer : cmd_buffer_handles) {
|
|
||||||
cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle);
|
|
||||||
}
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer));
|
|
||||||
std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(),
|
|
||||||
cmd_buffer_handles.size() * sizeof(MapBufferEntry));
|
|
||||||
|
|
||||||
return NvResult::Success;
|
|
||||||
}
|
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::UnmapBuffer(std::span<const u8> input, std::span<u8> output) {
|
|
||||||
IoctlMapBuffer params{};
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer));
|
|
||||||
std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
|
|
||||||
|
|
||||||
SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
|
|
||||||
for (auto& cmd_buffer : cmd_buffer_handles) {
|
|
||||||
nvmap.UnpinHandle(cmd_buffer.map_handle);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memset(output.data(), 0, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::SetSubmitTimeout(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_nvdec_common::UnmapBuffer(IoctlMapBuffer& params,
|
||||||
std::memcpy(&submit_timeout, input.data(), input.size());
|
std::span<MapBufferEntry> entries) {
|
||||||
|
const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
|
||||||
|
for (size_t i = 0; i < num_entries; i++) {
|
||||||
|
nvmap.UnpinHandle(entries[i].map_handle);
|
||||||
|
entries[i] = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
params = {};
|
||||||
|
return NvResult::Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
NvResult nvhost_nvdec_common::SetSubmitTimeout(u32 timeout) {
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -107,13 +107,13 @@ protected:
|
|||||||
static_assert(sizeof(IoctlMapBuffer) == 0x0C, "IoctlMapBuffer is incorrect size");
|
static_assert(sizeof(IoctlMapBuffer) == 0x0C, "IoctlMapBuffer is incorrect size");
|
||||||
|
|
||||||
/// Ioctl command implementations
|
/// Ioctl command implementations
|
||||||
NvResult SetNVMAPfd(std::span<const u8> input);
|
NvResult SetNVMAPfd(IoctlSetNvmapFD&);
|
||||||
NvResult Submit(DeviceFD fd, std::span<const u8> input, std::span<u8> output);
|
NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd);
|
||||||
NvResult GetSyncpoint(std::span<const u8> input, std::span<u8> output);
|
NvResult GetSyncpoint(IoctlGetSyncpoint& params);
|
||||||
NvResult GetWaitbase(std::span<const u8> input, std::span<u8> output);
|
NvResult GetWaitbase(IoctlGetWaitbase& params);
|
||||||
NvResult MapBuffer(std::span<const u8> input, std::span<u8> output);
|
NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
|
||||||
NvResult UnmapBuffer(std::span<const u8> input, std::span<u8> output);
|
NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
|
||||||
NvResult SetSubmitTimeout(std::span<const u8> input, std::span<u8> output);
|
NvResult SetSubmitTimeout(u32 timeout);
|
||||||
|
|
||||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
|
#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
@@ -18,7 +19,7 @@ NvResult nvhost_nvjpg::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
|
|||||||
case 'H':
|
case 'H':
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x1:
|
case 0x1:
|
||||||
return SetNVMAPfd(input, output);
|
return WrapFixed(this, &nvhost_nvjpg::SetNVMAPfd, input, output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -46,9 +47,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
|
|||||||
void nvhost_nvjpg::OnOpen(DeviceFD fd) {}
|
void nvhost_nvjpg::OnOpen(DeviceFD fd) {}
|
||||||
void nvhost_nvjpg::OnClose(DeviceFD fd) {}
|
void nvhost_nvjpg::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
NvResult nvhost_nvjpg::SetNVMAPfd(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) {
|
||||||
IoctlSetNvmapFD params{};
|
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
||||||
|
|
||||||
nvmap_fd = params.nvmap_fd;
|
nvmap_fd = params.nvmap_fd;
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ private:
|
|||||||
|
|
||||||
s32_le nvmap_fd{};
|
s32_le nvmap_fd{};
|
||||||
|
|
||||||
NvResult SetNVMAPfd(std::span<const u8> input, std::span<u8> output);
|
NvResult SetNVMAPfd(IoctlSetNvmapFD& params);
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/service/nvdrv/core/container.h"
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
|
#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
|
||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
|
|
||||||
@@ -25,16 +26,16 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
|||||||
if (!host1x_file.fd_to_id.contains(fd)) {
|
if (!host1x_file.fd_to_id.contains(fd)) {
|
||||||
host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++;
|
host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++;
|
||||||
}
|
}
|
||||||
return Submit(fd, input, output);
|
return WrapFixedVariable(this, &nvhost_vic::Submit, input, output, fd);
|
||||||
}
|
}
|
||||||
case 0x2:
|
case 0x2:
|
||||||
return GetSyncpoint(input, output);
|
return WrapFixed(this, &nvhost_vic::GetSyncpoint, input, output);
|
||||||
case 0x3:
|
case 0x3:
|
||||||
return GetWaitbase(input, output);
|
return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output);
|
||||||
case 0x9:
|
case 0x9:
|
||||||
return MapBuffer(input, output);
|
return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output);
|
||||||
case 0xa:
|
case 0xa:
|
||||||
return UnmapBuffer(input, output);
|
return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -42,7 +43,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
|||||||
case 'H':
|
case 'H':
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x1:
|
case 0x1:
|
||||||
return SetNVMAPfd(input);
|
return WrapFixed(this, &nvhost_vic::SetNVMAPfd, input, output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,6 +13,7 @@
|
|||||||
#include "core/hle/kernel/k_process.h"
|
#include "core/hle/kernel/k_process.h"
|
||||||
#include "core/hle/service/nvdrv/core/container.h"
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
|
#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
@@ -31,17 +32,17 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
|
|||||||
case 0x1:
|
case 0x1:
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x1:
|
case 0x1:
|
||||||
return IocCreate(input, output);
|
return WrapFixed(this, &nvmap::IocCreate, input, output);
|
||||||
case 0x3:
|
case 0x3:
|
||||||
return IocFromId(input, output);
|
return WrapFixed(this, &nvmap::IocFromId, input, output);
|
||||||
case 0x4:
|
case 0x4:
|
||||||
return IocAlloc(input, output);
|
return WrapFixed(this, &nvmap::IocAlloc, input, output);
|
||||||
case 0x5:
|
case 0x5:
|
||||||
return IocFree(input, output);
|
return WrapFixed(this, &nvmap::IocFree, input, output);
|
||||||
case 0x9:
|
case 0x9:
|
||||||
return IocParam(input, output);
|
return WrapFixed(this, &nvmap::IocParam, input, output);
|
||||||
case 0xe:
|
case 0xe:
|
||||||
return IocGetId(input, output);
|
return WrapFixed(this, &nvmap::IocGetId, input, output);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -69,9 +70,7 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, st
|
|||||||
void nvmap::OnOpen(DeviceFD fd) {}
|
void nvmap::OnOpen(DeviceFD fd) {}
|
||||||
void nvmap::OnClose(DeviceFD fd) {}
|
void nvmap::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
NvResult nvmap::IocCreate(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvmap::IocCreate(IocCreateParams& params) {
|
||||||
IocCreateParams params;
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
|
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
|
||||||
|
|
||||||
std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
|
std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
|
||||||
@@ -85,13 +84,10 @@ NvResult nvmap::IocCreate(std::span<const u8> input, std::span<u8> output) {
|
|||||||
params.handle = handle_description->id;
|
params.handle = handle_description->id;
|
||||||
LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size);
|
LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size);
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvmap::IocAlloc(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvmap::IocAlloc(IocAllocParams& params) {
|
||||||
IocAllocParams params;
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
|
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
|
||||||
|
|
||||||
if (!params.handle) {
|
if (!params.handle) {
|
||||||
@@ -133,14 +129,10 @@ NvResult nvmap::IocAlloc(std::span<const u8> input, std::span<u8> output) {
|
|||||||
handle_description->size,
|
handle_description->size,
|
||||||
Kernel::KMemoryPermission::None, true, false)
|
Kernel::KMemoryPermission::None, true, false)
|
||||||
.IsSuccess());
|
.IsSuccess());
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvmap::IocGetId(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvmap::IocGetId(IocGetIdParams& params) {
|
||||||
IocGetIdParams params;
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
|
|
||||||
// See the comment in FromId for extra info on this function
|
// See the comment in FromId for extra info on this function
|
||||||
@@ -157,14 +149,10 @@ NvResult nvmap::IocGetId(std::span<const u8> input, std::span<u8> output) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
params.id = handle_description->id;
|
params.id = handle_description->id;
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvmap::IocFromId(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvmap::IocFromId(IocFromIdParams& params) {
|
||||||
IocFromIdParams params;
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id);
|
LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id);
|
||||||
|
|
||||||
// Handles and IDs are always the same value in nvmap however IDs can be used globally given the
|
// Handles and IDs are always the same value in nvmap however IDs can be used globally given the
|
||||||
@@ -188,16 +176,12 @@ NvResult nvmap::IocFromId(std::span<const u8> input, std::span<u8> output) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
params.handle = handle_description->id;
|
params.handle = handle_description->id;
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvmap::IocParam(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvmap::IocParam(IocParamParams& params) {
|
||||||
enum class ParamTypes { Size = 1, Alignment = 2, Base = 3, Heap = 4, Kind = 5, Compr = 6 };
|
enum class ParamTypes { Size = 1, Alignment = 2, Base = 3, Heap = 4, Kind = 5, Compr = 6 };
|
||||||
|
|
||||||
IocParamParams params;
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called type={}", params.param);
|
LOG_DEBUG(Service_NVDRV, "called type={}", params.param);
|
||||||
|
|
||||||
if (!params.handle) {
|
if (!params.handle) {
|
||||||
@@ -237,14 +221,10 @@ NvResult nvmap::IocParam(std::span<const u8> input, std::span<u8> output) {
|
|||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvmap::IocFree(std::span<const u8> input, std::span<u8> output) {
|
NvResult nvmap::IocFree(IocFreeParams& params) {
|
||||||
IocFreeParams params;
|
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
|
|
||||||
if (!params.handle) {
|
if (!params.handle) {
|
||||||
@@ -267,7 +247,6 @@ NvResult nvmap::IocFree(std::span<const u8> input, std::span<u8> output) {
|
|||||||
// This is possible when there's internal dups or other duplicates.
|
// This is possible when there's internal dups or other duplicates.
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -99,12 +99,12 @@ public:
|
|||||||
};
|
};
|
||||||
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
||||||
|
|
||||||
NvResult IocCreate(std::span<const u8> input, std::span<u8> output);
|
NvResult IocCreate(IocCreateParams& params);
|
||||||
NvResult IocAlloc(std::span<const u8> input, std::span<u8> output);
|
NvResult IocAlloc(IocAllocParams& params);
|
||||||
NvResult IocGetId(std::span<const u8> input, std::span<u8> output);
|
NvResult IocGetId(IocGetIdParams& params);
|
||||||
NvResult IocFromId(std::span<const u8> input, std::span<u8> output);
|
NvResult IocFromId(IocFromIdParams& params);
|
||||||
NvResult IocParam(std::span<const u8> input, std::span<u8> output);
|
NvResult IocParam(IocParamParams& params);
|
||||||
NvResult IocFree(std::span<const u8> input, std::span<u8> output);
|
NvResult IocFree(IocFreeParams& params);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Id to use for the next handle that is created.
|
/// Id to use for the next handle that is created.
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_funcs.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
|
||||||
namespace Service::android {
|
namespace Service::android {
|
||||||
@@ -21,5 +22,6 @@ enum class BufferTransformFlags : u32 {
|
|||||||
/// Rotate source image 270 degrees clockwise
|
/// Rotate source image 270 degrees clockwise
|
||||||
Rotate270 = 0x07,
|
Rotate270 = 0x07,
|
||||||
};
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(BufferTransformFlags);
|
||||||
|
|
||||||
} // namespace Service::android
|
} // namespace Service::android
|
||||||
|
|||||||
@@ -71,24 +71,17 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address,
|
|||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
std::span<u8> SerializeIoc(T& params) {
|
|
||||||
return std::span(reinterpret_cast<u8*>(std::addressof(params)), sizeof(T));
|
|
||||||
}
|
|
||||||
|
|
||||||
Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap, u32 size) {
|
Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap, u32 size) {
|
||||||
// Create a handle.
|
// Create a handle.
|
||||||
Nvidia::Devices::nvmap::IocCreateParams create_in_params{
|
Nvidia::Devices::nvmap::IocCreateParams create_params{
|
||||||
.size = size,
|
.size = size,
|
||||||
.handle = 0,
|
.handle = 0,
|
||||||
};
|
};
|
||||||
Nvidia::Devices::nvmap::IocCreateParams create_out_params{};
|
R_UNLESS(nvmap.IocCreate(create_params) == Nvidia::NvResult::Success,
|
||||||
R_UNLESS(nvmap.IocCreate(SerializeIoc(create_in_params), SerializeIoc(create_out_params)) ==
|
|
||||||
Nvidia::NvResult::Success,
|
|
||||||
VI::ResultOperationFailed);
|
VI::ResultOperationFailed);
|
||||||
|
|
||||||
// Assign the output handle.
|
// Assign the output handle.
|
||||||
*out_nv_map_handle = create_out_params.handle;
|
*out_nv_map_handle = create_params.handle;
|
||||||
|
|
||||||
// We succeeded.
|
// We succeeded.
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
@@ -96,13 +89,10 @@ Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap,
|
|||||||
|
|
||||||
Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
|
Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
|
||||||
// Free the handle.
|
// Free the handle.
|
||||||
Nvidia::Devices::nvmap::IocFreeParams free_in_params{
|
Nvidia::Devices::nvmap::IocFreeParams free_params{
|
||||||
.handle = handle,
|
.handle = handle,
|
||||||
};
|
};
|
||||||
Nvidia::Devices::nvmap::IocFreeParams free_out_params{};
|
R_UNLESS(nvmap.IocFree(free_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
|
||||||
R_UNLESS(nvmap.IocFree(SerializeIoc(free_in_params), SerializeIoc(free_out_params)) ==
|
|
||||||
Nvidia::NvResult::Success,
|
|
||||||
VI::ResultOperationFailed);
|
|
||||||
|
|
||||||
// We succeeded.
|
// We succeeded.
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
@@ -111,7 +101,7 @@ Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
|
|||||||
Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer,
|
Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer,
|
||||||
u32 size) {
|
u32 size) {
|
||||||
// Assign the allocated memory to the handle.
|
// Assign the allocated memory to the handle.
|
||||||
Nvidia::Devices::nvmap::IocAllocParams alloc_in_params{
|
Nvidia::Devices::nvmap::IocAllocParams alloc_params{
|
||||||
.handle = handle,
|
.handle = handle,
|
||||||
.heap_mask = 0,
|
.heap_mask = 0,
|
||||||
.flags = {},
|
.flags = {},
|
||||||
@@ -119,10 +109,7 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce
|
|||||||
.kind = 0,
|
.kind = 0,
|
||||||
.address = GetInteger(buffer),
|
.address = GetInteger(buffer),
|
||||||
};
|
};
|
||||||
Nvidia::Devices::nvmap::IocAllocParams alloc_out_params{};
|
R_UNLESS(nvmap.IocAlloc(alloc_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
|
||||||
R_UNLESS(nvmap.IocAlloc(SerializeIoc(alloc_in_params), SerializeIoc(alloc_out_params)) ==
|
|
||||||
Nvidia::NvResult::Success,
|
|
||||||
VI::ResultOperationFailed);
|
|
||||||
|
|
||||||
// We succeeded.
|
// We succeeded.
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|||||||
@@ -39,6 +39,18 @@ bool IsConnectionBased(Type type) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
T GetValue(std::span<const u8> buffer) {
|
||||||
|
T t{};
|
||||||
|
std::memcpy(&t, buffer.data(), std::min(sizeof(T), buffer.size()));
|
||||||
|
return t;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void PutValue(std::span<u8> buffer, const T& t) {
|
||||||
|
std::memcpy(buffer.data(), &t, std::min(sizeof(T), buffer.size()));
|
||||||
|
}
|
||||||
|
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
void BSD::PollWork::Execute(BSD* bsd) {
|
void BSD::PollWork::Execute(BSD* bsd) {
|
||||||
@@ -316,22 +328,12 @@ void BSD::SetSockOpt(HLERequestContext& ctx) {
|
|||||||
const s32 fd = rp.Pop<s32>();
|
const s32 fd = rp.Pop<s32>();
|
||||||
const u32 level = rp.Pop<u32>();
|
const u32 level = rp.Pop<u32>();
|
||||||
const OptName optname = static_cast<OptName>(rp.Pop<u32>());
|
const OptName optname = static_cast<OptName>(rp.Pop<u32>());
|
||||||
|
const auto optval = ctx.ReadBuffer();
|
||||||
const auto buffer = ctx.ReadBuffer();
|
|
||||||
const u8* optval = buffer.empty() ? nullptr : buffer.data();
|
|
||||||
size_t optlen = buffer.size();
|
|
||||||
|
|
||||||
std::array<u64, 2> values;
|
|
||||||
if ((optname == OptName::SNDTIMEO || optname == OptName::RCVTIMEO) && buffer.size() == 8) {
|
|
||||||
std::memcpy(values.data(), buffer.data(), sizeof(values));
|
|
||||||
optlen = sizeof(values);
|
|
||||||
optval = reinterpret_cast<const u8*>(values.data());
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_DEBUG(Service, "called. fd={} level={} optname=0x{:x} optlen={}", fd, level,
|
LOG_DEBUG(Service, "called. fd={} level={} optname=0x{:x} optlen={}", fd, level,
|
||||||
static_cast<u32>(optname), optlen);
|
static_cast<u32>(optname), optval.size());
|
||||||
|
|
||||||
BuildErrnoResponse(ctx, SetSockOptImpl(fd, level, optname, optlen, optval));
|
BuildErrnoResponse(ctx, SetSockOptImpl(fd, level, optname, optval));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BSD::Shutdown(HLERequestContext& ctx) {
|
void BSD::Shutdown(HLERequestContext& ctx) {
|
||||||
@@ -521,18 +523,19 @@ std::pair<s32, Errno> BSD::SocketImpl(Domain domain, Type type, Protocol protoco
|
|||||||
|
|
||||||
std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::span<const u8> read_buffer,
|
std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::span<const u8> read_buffer,
|
||||||
s32 nfds, s32 timeout) {
|
s32 nfds, s32 timeout) {
|
||||||
|
if (nfds <= 0) {
|
||||||
|
// When no entries are provided, -1 is returned with errno zero
|
||||||
|
return {-1, Errno::SUCCESS};
|
||||||
|
}
|
||||||
|
if (read_buffer.size() < nfds * sizeof(PollFD)) {
|
||||||
|
return {-1, Errno::INVAL};
|
||||||
|
}
|
||||||
if (write_buffer.size() < nfds * sizeof(PollFD)) {
|
if (write_buffer.size() < nfds * sizeof(PollFD)) {
|
||||||
return {-1, Errno::INVAL};
|
return {-1, Errno::INVAL};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nfds == 0) {
|
|
||||||
// When no entries are provided, -1 is returned with errno zero
|
|
||||||
return {-1, Errno::SUCCESS};
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t length = std::min(read_buffer.size(), write_buffer.size());
|
|
||||||
std::vector<PollFD> fds(nfds);
|
std::vector<PollFD> fds(nfds);
|
||||||
std::memcpy(fds.data(), read_buffer.data(), length);
|
std::memcpy(fds.data(), read_buffer.data(), nfds * sizeof(PollFD));
|
||||||
|
|
||||||
if (timeout >= 0) {
|
if (timeout >= 0) {
|
||||||
const s64 seconds = timeout / 1000;
|
const s64 seconds = timeout / 1000;
|
||||||
@@ -580,7 +583,7 @@ std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::span<con
|
|||||||
for (size_t i = 0; i < num; ++i) {
|
for (size_t i = 0; i < num; ++i) {
|
||||||
fds[i].revents = Translate(host_pollfds[i].revents);
|
fds[i].revents = Translate(host_pollfds[i].revents);
|
||||||
}
|
}
|
||||||
std::memcpy(write_buffer.data(), fds.data(), length);
|
std::memcpy(write_buffer.data(), fds.data(), nfds * sizeof(PollFD));
|
||||||
|
|
||||||
return Translate(result);
|
return Translate(result);
|
||||||
}
|
}
|
||||||
@@ -608,8 +611,7 @@ std::pair<s32, Errno> BSD::AcceptImpl(s32 fd, std::vector<u8>& write_buffer) {
|
|||||||
new_descriptor.is_connection_based = descriptor.is_connection_based;
|
new_descriptor.is_connection_based = descriptor.is_connection_based;
|
||||||
|
|
||||||
const SockAddrIn guest_addr_in = Translate(result.sockaddr_in);
|
const SockAddrIn guest_addr_in = Translate(result.sockaddr_in);
|
||||||
const size_t length = std::min(sizeof(guest_addr_in), write_buffer.size());
|
PutValue(write_buffer, guest_addr_in);
|
||||||
std::memcpy(write_buffer.data(), &guest_addr_in, length);
|
|
||||||
|
|
||||||
return {new_fd, Errno::SUCCESS};
|
return {new_fd, Errno::SUCCESS};
|
||||||
}
|
}
|
||||||
@@ -619,8 +621,7 @@ Errno BSD::BindImpl(s32 fd, std::span<const u8> addr) {
|
|||||||
return Errno::BADF;
|
return Errno::BADF;
|
||||||
}
|
}
|
||||||
ASSERT(addr.size() == sizeof(SockAddrIn));
|
ASSERT(addr.size() == sizeof(SockAddrIn));
|
||||||
SockAddrIn addr_in;
|
auto addr_in = GetValue<SockAddrIn>(addr);
|
||||||
std::memcpy(&addr_in, addr.data(), sizeof(addr_in));
|
|
||||||
|
|
||||||
return Translate(file_descriptors[fd]->socket->Bind(Translate(addr_in)));
|
return Translate(file_descriptors[fd]->socket->Bind(Translate(addr_in)));
|
||||||
}
|
}
|
||||||
@@ -631,8 +632,7 @@ Errno BSD::ConnectImpl(s32 fd, std::span<const u8> addr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
UNIMPLEMENTED_IF(addr.size() != sizeof(SockAddrIn));
|
UNIMPLEMENTED_IF(addr.size() != sizeof(SockAddrIn));
|
||||||
SockAddrIn addr_in;
|
auto addr_in = GetValue<SockAddrIn>(addr);
|
||||||
std::memcpy(&addr_in, addr.data(), sizeof(addr_in));
|
|
||||||
|
|
||||||
return Translate(file_descriptors[fd]->socket->Connect(Translate(addr_in)));
|
return Translate(file_descriptors[fd]->socket->Connect(Translate(addr_in)));
|
||||||
}
|
}
|
||||||
@@ -650,7 +650,7 @@ Errno BSD::GetPeerNameImpl(s32 fd, std::vector<u8>& write_buffer) {
|
|||||||
|
|
||||||
ASSERT(write_buffer.size() >= sizeof(guest_addrin));
|
ASSERT(write_buffer.size() >= sizeof(guest_addrin));
|
||||||
write_buffer.resize(sizeof(guest_addrin));
|
write_buffer.resize(sizeof(guest_addrin));
|
||||||
std::memcpy(write_buffer.data(), &guest_addrin, sizeof(guest_addrin));
|
PutValue(write_buffer, guest_addrin);
|
||||||
return Translate(bsd_errno);
|
return Translate(bsd_errno);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -667,7 +667,7 @@ Errno BSD::GetSockNameImpl(s32 fd, std::vector<u8>& write_buffer) {
|
|||||||
|
|
||||||
ASSERT(write_buffer.size() >= sizeof(guest_addrin));
|
ASSERT(write_buffer.size() >= sizeof(guest_addrin));
|
||||||
write_buffer.resize(sizeof(guest_addrin));
|
write_buffer.resize(sizeof(guest_addrin));
|
||||||
std::memcpy(write_buffer.data(), &guest_addrin, sizeof(guest_addrin));
|
PutValue(write_buffer, guest_addrin);
|
||||||
return Translate(bsd_errno);
|
return Translate(bsd_errno);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -725,7 +725,7 @@ Errno BSD::GetSockOptImpl(s32 fd, u32 level, OptName optname, std::vector<u8>& o
|
|||||||
optval.size() == sizeof(Errno), { return Errno::INVAL; },
|
optval.size() == sizeof(Errno), { return Errno::INVAL; },
|
||||||
"Incorrect getsockopt option size");
|
"Incorrect getsockopt option size");
|
||||||
optval.resize(sizeof(Errno));
|
optval.resize(sizeof(Errno));
|
||||||
memcpy(optval.data(), &translated_pending_err, sizeof(Errno));
|
PutValue(optval, translated_pending_err);
|
||||||
}
|
}
|
||||||
return Translate(getsockopt_err);
|
return Translate(getsockopt_err);
|
||||||
}
|
}
|
||||||
@@ -735,7 +735,7 @@ Errno BSD::GetSockOptImpl(s32 fd, u32 level, OptName optname, std::vector<u8>& o
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Errno BSD::SetSockOptImpl(s32 fd, u32 level, OptName optname, size_t optlen, const void* optval) {
|
Errno BSD::SetSockOptImpl(s32 fd, u32 level, OptName optname, std::span<const u8> optval) {
|
||||||
if (!IsFileDescriptorValid(fd)) {
|
if (!IsFileDescriptorValid(fd)) {
|
||||||
return Errno::BADF;
|
return Errno::BADF;
|
||||||
}
|
}
|
||||||
@@ -748,17 +748,15 @@ Errno BSD::SetSockOptImpl(s32 fd, u32 level, OptName optname, size_t optlen, con
|
|||||||
Network::SocketBase* const socket = file_descriptors[fd]->socket.get();
|
Network::SocketBase* const socket = file_descriptors[fd]->socket.get();
|
||||||
|
|
||||||
if (optname == OptName::LINGER) {
|
if (optname == OptName::LINGER) {
|
||||||
ASSERT(optlen == sizeof(Linger));
|
ASSERT(optval.size() == sizeof(Linger));
|
||||||
Linger linger;
|
auto linger = GetValue<Linger>(optval);
|
||||||
std::memcpy(&linger, optval, sizeof(linger));
|
|
||||||
ASSERT(linger.onoff == 0 || linger.onoff == 1);
|
ASSERT(linger.onoff == 0 || linger.onoff == 1);
|
||||||
|
|
||||||
return Translate(socket->SetLinger(linger.onoff != 0, linger.linger));
|
return Translate(socket->SetLinger(linger.onoff != 0, linger.linger));
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(optlen == sizeof(u32));
|
ASSERT(optval.size() == sizeof(u32));
|
||||||
u32 value;
|
auto value = GetValue<u32>(optval);
|
||||||
std::memcpy(&value, optval, sizeof(value));
|
|
||||||
|
|
||||||
switch (optname) {
|
switch (optname) {
|
||||||
case OptName::REUSEADDR:
|
case OptName::REUSEADDR:
|
||||||
@@ -862,7 +860,7 @@ std::pair<s32, Errno> BSD::RecvFromImpl(s32 fd, u32 flags, std::vector<u8>& mess
|
|||||||
} else {
|
} else {
|
||||||
ASSERT(addr.size() == sizeof(SockAddrIn));
|
ASSERT(addr.size() == sizeof(SockAddrIn));
|
||||||
const SockAddrIn result = Translate(addr_in);
|
const SockAddrIn result = Translate(addr_in);
|
||||||
std::memcpy(addr.data(), &result, sizeof(result));
|
PutValue(addr, result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -886,8 +884,7 @@ std::pair<s32, Errno> BSD::SendToImpl(s32 fd, u32 flags, std::span<const u8> mes
|
|||||||
Network::SockAddrIn* p_addr_in = nullptr;
|
Network::SockAddrIn* p_addr_in = nullptr;
|
||||||
if (!addr.empty()) {
|
if (!addr.empty()) {
|
||||||
ASSERT(addr.size() == sizeof(SockAddrIn));
|
ASSERT(addr.size() == sizeof(SockAddrIn));
|
||||||
SockAddrIn guest_addr_in;
|
auto guest_addr_in = GetValue<SockAddrIn>(addr);
|
||||||
std::memcpy(&guest_addr_in, addr.data(), sizeof(guest_addr_in));
|
|
||||||
addr_in = Translate(guest_addr_in);
|
addr_in = Translate(guest_addr_in);
|
||||||
p_addr_in = &addr_in;
|
p_addr_in = &addr_in;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -163,7 +163,7 @@ private:
|
|||||||
Errno ListenImpl(s32 fd, s32 backlog);
|
Errno ListenImpl(s32 fd, s32 backlog);
|
||||||
std::pair<s32, Errno> FcntlImpl(s32 fd, FcntlCmd cmd, s32 arg);
|
std::pair<s32, Errno> FcntlImpl(s32 fd, FcntlCmd cmd, s32 arg);
|
||||||
Errno GetSockOptImpl(s32 fd, u32 level, OptName optname, std::vector<u8>& optval);
|
Errno GetSockOptImpl(s32 fd, u32 level, OptName optname, std::vector<u8>& optval);
|
||||||
Errno SetSockOptImpl(s32 fd, u32 level, OptName optname, size_t optlen, const void* optval);
|
Errno SetSockOptImpl(s32 fd, u32 level, OptName optname, std::span<const u8> optval);
|
||||||
Errno ShutdownImpl(s32 fd, s32 how);
|
Errno ShutdownImpl(s32 fd, s32 how);
|
||||||
std::pair<s32, Errno> RecvImpl(s32 fd, u32 flags, std::vector<u8>& message);
|
std::pair<s32, Errno> RecvImpl(s32 fd, u32 flags, std::vector<u8>& message);
|
||||||
std::pair<s32, Errno> RecvFromImpl(s32 fd, u32 flags, std::vector<u8>& message,
|
std::pair<s32, Errno> RecvFromImpl(s32 fd, u32 flags, std::vector<u8>& message,
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ struct Memory::Impl {
|
|||||||
explicit Impl(Core::System& system_) : system{system_} {}
|
explicit Impl(Core::System& system_) : system{system_} {}
|
||||||
|
|
||||||
void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
|
void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
|
||||||
current_page_table = &process.GetPageTable().PageTableImpl();
|
current_page_table = &process.GetPageTable().GetImpl();
|
||||||
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
|
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
|
||||||
|
|
||||||
const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth();
|
const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth();
|
||||||
@@ -195,7 +195,7 @@ struct Memory::Impl {
|
|||||||
|
|
||||||
bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped,
|
bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped,
|
||||||
auto on_memory, auto on_rasterizer, auto increment) {
|
auto on_memory, auto on_rasterizer, auto increment) {
|
||||||
const auto& page_table = system.ApplicationProcess()->GetPageTable().PageTableImpl();
|
const auto& page_table = system.ApplicationProcess()->GetPageTable().GetImpl();
|
||||||
std::size_t remaining_size = size;
|
std::size_t remaining_size = size;
|
||||||
std::size_t page_index = addr >> YUZU_PAGEBITS;
|
std::size_t page_index = addr >> YUZU_PAGEBITS;
|
||||||
std::size_t page_offset = addr & YUZU_PAGEMASK;
|
std::size_t page_offset = addr & YUZU_PAGEMASK;
|
||||||
@@ -826,7 +826,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress b
|
|||||||
|
|
||||||
bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
|
bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
|
||||||
const Kernel::KProcess& process = *system.ApplicationProcess();
|
const Kernel::KProcess& process = *system.ApplicationProcess();
|
||||||
const auto& page_table = process.GetPageTable().PageTableImpl();
|
const auto& page_table = process.GetPageTable().GetImpl();
|
||||||
const size_t page = vaddr >> YUZU_PAGEBITS;
|
const size_t page = vaddr >> YUZU_PAGEBITS;
|
||||||
if (page >= page_table.pointers.size()) {
|
if (page >= page_table.pointers.size()) {
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#include "common/alignment.h"
|
#include "common/alignment.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
#include "video_core/control/channel_state.h"
|
||||||
#include "video_core/host1x/host1x.h"
|
#include "video_core/host1x/host1x.h"
|
||||||
#include "video_core/memory_manager.h"
|
#include "video_core/memory_manager.h"
|
||||||
#include "video_core/renderer_null/null_rasterizer.h"
|
#include "video_core/renderer_null/null_rasterizer.h"
|
||||||
@@ -99,8 +100,14 @@ bool RasterizerNull::AccelerateDisplay(const Tegra::FramebufferConfig& config,
|
|||||||
}
|
}
|
||||||
void RasterizerNull::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
|
void RasterizerNull::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
|
||||||
const VideoCore::DiskResourceLoadCallback& callback) {}
|
const VideoCore::DiskResourceLoadCallback& callback) {}
|
||||||
void RasterizerNull::InitializeChannel(Tegra::Control::ChannelState& channel) {}
|
void RasterizerNull::InitializeChannel(Tegra::Control::ChannelState& channel) {
|
||||||
void RasterizerNull::BindChannel(Tegra::Control::ChannelState& channel) {}
|
CreateChannel(channel);
|
||||||
void RasterizerNull::ReleaseChannel(s32 channel_id) {}
|
}
|
||||||
|
void RasterizerNull::BindChannel(Tegra::Control::ChannelState& channel) {
|
||||||
|
BindToChannel(channel.bind_id);
|
||||||
|
}
|
||||||
|
void RasterizerNull::ReleaseChannel(s32 channel_id) {
|
||||||
|
EraseChannel(channel_id);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Null
|
} // namespace Null
|
||||||
|
|||||||
@@ -137,6 +137,56 @@ BlitScreen::BlitScreen(Core::Memory::Memory& cpu_memory_, Core::Frontend::EmuWin
|
|||||||
|
|
||||||
BlitScreen::~BlitScreen() = default;
|
BlitScreen::~BlitScreen() = default;
|
||||||
|
|
||||||
|
static Common::Rectangle<f32> NormalizeCrop(const Tegra::FramebufferConfig& framebuffer,
|
||||||
|
const ScreenInfo& screen_info) {
|
||||||
|
f32 left, top, right, bottom;
|
||||||
|
|
||||||
|
if (!framebuffer.crop_rect.IsEmpty()) {
|
||||||
|
// If crop rectangle is not empty, apply properties from rectangle.
|
||||||
|
left = static_cast<f32>(framebuffer.crop_rect.left);
|
||||||
|
top = static_cast<f32>(framebuffer.crop_rect.top);
|
||||||
|
right = static_cast<f32>(framebuffer.crop_rect.right);
|
||||||
|
bottom = static_cast<f32>(framebuffer.crop_rect.bottom);
|
||||||
|
} else {
|
||||||
|
// Otherwise, fall back to framebuffer dimensions.
|
||||||
|
left = 0;
|
||||||
|
top = 0;
|
||||||
|
right = static_cast<f32>(framebuffer.width);
|
||||||
|
bottom = static_cast<f32>(framebuffer.height);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply transformation flags.
|
||||||
|
auto framebuffer_transform_flags = framebuffer.transform_flags;
|
||||||
|
|
||||||
|
if (True(framebuffer_transform_flags & Service::android::BufferTransformFlags::FlipH)) {
|
||||||
|
// Switch left and right.
|
||||||
|
std::swap(left, right);
|
||||||
|
}
|
||||||
|
if (True(framebuffer_transform_flags & Service::android::BufferTransformFlags::FlipV)) {
|
||||||
|
// Switch top and bottom.
|
||||||
|
std::swap(top, bottom);
|
||||||
|
}
|
||||||
|
|
||||||
|
framebuffer_transform_flags &= ~Service::android::BufferTransformFlags::FlipH;
|
||||||
|
framebuffer_transform_flags &= ~Service::android::BufferTransformFlags::FlipV;
|
||||||
|
if (True(framebuffer_transform_flags)) {
|
||||||
|
UNIMPLEMENTED_MSG("Unsupported framebuffer_transform_flags={}",
|
||||||
|
static_cast<u32>(framebuffer_transform_flags));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the screen properties.
|
||||||
|
const f32 screen_width = static_cast<f32>(screen_info.width);
|
||||||
|
const f32 screen_height = static_cast<f32>(screen_info.height);
|
||||||
|
|
||||||
|
// Normalize coordinate space.
|
||||||
|
left /= screen_width;
|
||||||
|
top /= screen_height;
|
||||||
|
right /= screen_width;
|
||||||
|
bottom /= screen_height;
|
||||||
|
|
||||||
|
return Common::Rectangle<f32>(left, top, right, bottom);
|
||||||
|
}
|
||||||
|
|
||||||
void BlitScreen::Recreate() {
|
void BlitScreen::Recreate() {
|
||||||
present_manager.WaitPresent();
|
present_manager.WaitPresent();
|
||||||
scheduler.Finish();
|
scheduler.Finish();
|
||||||
@@ -354,17 +404,10 @@ void BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
|
|||||||
source_image_view = smaa->Draw(scheduler, image_index, source_image, source_image_view);
|
source_image_view = smaa->Draw(scheduler, image_index, source_image, source_image_view);
|
||||||
}
|
}
|
||||||
if (fsr) {
|
if (fsr) {
|
||||||
auto crop_rect = framebuffer.crop_rect;
|
const auto crop_rect = NormalizeCrop(framebuffer, screen_info);
|
||||||
if (crop_rect.GetWidth() == 0) {
|
const VkExtent2D fsr_input_size{
|
||||||
crop_rect.right = framebuffer.width;
|
.width = Settings::values.resolution_info.ScaleUp(screen_info.width),
|
||||||
}
|
.height = Settings::values.resolution_info.ScaleUp(screen_info.height),
|
||||||
if (crop_rect.GetHeight() == 0) {
|
|
||||||
crop_rect.bottom = framebuffer.height;
|
|
||||||
}
|
|
||||||
crop_rect = crop_rect.Scale(Settings::values.resolution_info.up_factor);
|
|
||||||
VkExtent2D fsr_input_size{
|
|
||||||
.width = Settings::values.resolution_info.ScaleUp(framebuffer.width),
|
|
||||||
.height = Settings::values.resolution_info.ScaleUp(framebuffer.height),
|
|
||||||
};
|
};
|
||||||
VkImageView fsr_image_view =
|
VkImageView fsr_image_view =
|
||||||
fsr->Draw(scheduler, image_index, source_image_view, fsr_input_size, crop_rect);
|
fsr->Draw(scheduler, image_index, source_image_view, fsr_input_size, crop_rect);
|
||||||
@@ -1397,61 +1440,37 @@ void BlitScreen::SetUniformData(BufferData& data, const Layout::FramebufferLayou
|
|||||||
|
|
||||||
void BlitScreen::SetVertexData(BufferData& data, const Tegra::FramebufferConfig& framebuffer,
|
void BlitScreen::SetVertexData(BufferData& data, const Tegra::FramebufferConfig& framebuffer,
|
||||||
const Layout::FramebufferLayout layout) const {
|
const Layout::FramebufferLayout layout) const {
|
||||||
const auto& framebuffer_transform_flags = framebuffer.transform_flags;
|
f32 left, top, right, bottom;
|
||||||
const auto& framebuffer_crop_rect = framebuffer.crop_rect;
|
|
||||||
|
|
||||||
static constexpr Common::Rectangle<f32> texcoords{0.f, 0.f, 1.f, 1.f};
|
if (fsr) {
|
||||||
auto left = texcoords.left;
|
// FSR has already applied the crop, so we just want to render the image
|
||||||
auto right = texcoords.right;
|
// it has produced.
|
||||||
|
left = 0;
|
||||||
|
top = 0;
|
||||||
|
right = 1;
|
||||||
|
bottom = 1;
|
||||||
|
} else {
|
||||||
|
// Get the normalized crop rectangle.
|
||||||
|
const auto crop = NormalizeCrop(framebuffer, screen_info);
|
||||||
|
|
||||||
switch (framebuffer_transform_flags) {
|
// Apply the crop.
|
||||||
case Service::android::BufferTransformFlags::Unset:
|
left = crop.left;
|
||||||
break;
|
top = crop.top;
|
||||||
case Service::android::BufferTransformFlags::FlipV:
|
right = crop.right;
|
||||||
// Flip the framebuffer vertically
|
bottom = crop.bottom;
|
||||||
left = texcoords.right;
|
|
||||||
right = texcoords.left;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
UNIMPLEMENTED_MSG("Unsupported framebuffer_transform_flags={}",
|
|
||||||
static_cast<u32>(framebuffer_transform_flags));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
UNIMPLEMENTED_IF(framebuffer_crop_rect.left != 0);
|
|
||||||
|
|
||||||
f32 left_start{};
|
|
||||||
if (framebuffer_crop_rect.Top() > 0) {
|
|
||||||
left_start = static_cast<f32>(framebuffer_crop_rect.Top()) /
|
|
||||||
static_cast<f32>(framebuffer_crop_rect.Bottom());
|
|
||||||
}
|
|
||||||
f32 scale_u = static_cast<f32>(framebuffer.width) / static_cast<f32>(screen_info.width);
|
|
||||||
f32 scale_v = static_cast<f32>(framebuffer.height) / static_cast<f32>(screen_info.height);
|
|
||||||
// Scale the output by the crop width/height. This is commonly used with 1280x720 rendering
|
|
||||||
// (e.g. handheld mode) on a 1920x1080 framebuffer.
|
|
||||||
if (!fsr) {
|
|
||||||
if (framebuffer_crop_rect.GetWidth() > 0) {
|
|
||||||
scale_u = static_cast<f32>(framebuffer_crop_rect.GetWidth()) /
|
|
||||||
static_cast<f32>(screen_info.width);
|
|
||||||
}
|
|
||||||
if (framebuffer_crop_rect.GetHeight() > 0) {
|
|
||||||
scale_v = static_cast<f32>(framebuffer_crop_rect.GetHeight()) /
|
|
||||||
static_cast<f32>(screen_info.height);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Map the coordinates to the screen.
|
||||||
const auto& screen = layout.screen;
|
const auto& screen = layout.screen;
|
||||||
const auto x = static_cast<f32>(screen.left);
|
const auto x = static_cast<f32>(screen.left);
|
||||||
const auto y = static_cast<f32>(screen.top);
|
const auto y = static_cast<f32>(screen.top);
|
||||||
const auto w = static_cast<f32>(screen.GetWidth());
|
const auto w = static_cast<f32>(screen.GetWidth());
|
||||||
const auto h = static_cast<f32>(screen.GetHeight());
|
const auto h = static_cast<f32>(screen.GetHeight());
|
||||||
data.vertices[0] = ScreenRectVertex(x, y, texcoords.top * scale_u, left_start + left * scale_v);
|
|
||||||
data.vertices[1] =
|
data.vertices[0] = ScreenRectVertex(x, y, left, top);
|
||||||
ScreenRectVertex(x + w, y, texcoords.bottom * scale_u, left_start + left * scale_v);
|
data.vertices[1] = ScreenRectVertex(x + w, y, right, top);
|
||||||
data.vertices[2] =
|
data.vertices[2] = ScreenRectVertex(x, y + h, left, bottom);
|
||||||
ScreenRectVertex(x, y + h, texcoords.top * scale_u, left_start + right * scale_v);
|
data.vertices[3] = ScreenRectVertex(x + w, y + h, right, bottom);
|
||||||
data.vertices[3] =
|
|
||||||
ScreenRectVertex(x + w, y + h, texcoords.bottom * scale_u, left_start + right * scale_v);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlitScreen::CreateSMAA(VkExtent2D smaa_size) {
|
void BlitScreen::CreateSMAA(VkExtent2D smaa_size) {
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ FSR::FSR(const Device& device_, MemoryAllocator& memory_allocator_, size_t image
|
|||||||
}
|
}
|
||||||
|
|
||||||
VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view,
|
VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view,
|
||||||
VkExtent2D input_image_extent, const Common::Rectangle<int>& crop_rect) {
|
VkExtent2D input_image_extent, const Common::Rectangle<f32>& crop_rect) {
|
||||||
|
|
||||||
UpdateDescriptorSet(image_index, image_view);
|
UpdateDescriptorSet(image_index, image_view);
|
||||||
|
|
||||||
@@ -61,15 +61,21 @@ VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImageView imag
|
|||||||
|
|
||||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *easu_pipeline);
|
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *easu_pipeline);
|
||||||
|
|
||||||
std::array<u32, 4 * 4> push_constants;
|
const f32 input_image_width = static_cast<f32>(input_image_extent.width);
|
||||||
FsrEasuConOffset(
|
const f32 input_image_height = static_cast<f32>(input_image_extent.height);
|
||||||
push_constants.data() + 0, push_constants.data() + 4, push_constants.data() + 8,
|
const f32 output_image_width = static_cast<f32>(output_size.width);
|
||||||
push_constants.data() + 12,
|
const f32 output_image_height = static_cast<f32>(output_size.height);
|
||||||
|
const f32 viewport_width = (crop_rect.right - crop_rect.left) * input_image_width;
|
||||||
|
const f32 viewport_x = crop_rect.left * input_image_width;
|
||||||
|
const f32 viewport_height = (crop_rect.bottom - crop_rect.top) * input_image_height;
|
||||||
|
const f32 viewport_y = crop_rect.top * input_image_height;
|
||||||
|
|
||||||
static_cast<f32>(crop_rect.GetWidth()), static_cast<f32>(crop_rect.GetHeight()),
|
std::array<u32, 4 * 4> push_constants;
|
||||||
static_cast<f32>(input_image_extent.width), static_cast<f32>(input_image_extent.height),
|
FsrEasuConOffset(push_constants.data() + 0, push_constants.data() + 4,
|
||||||
static_cast<f32>(output_size.width), static_cast<f32>(output_size.height),
|
push_constants.data() + 8, push_constants.data() + 12,
|
||||||
static_cast<f32>(crop_rect.left), static_cast<f32>(crop_rect.top));
|
|
||||||
|
viewport_width, viewport_height, input_image_width, input_image_height,
|
||||||
|
output_image_width, output_image_height, viewport_x, viewport_y);
|
||||||
cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, push_constants);
|
cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, push_constants);
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ public:
|
|||||||
explicit FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count,
|
explicit FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count,
|
||||||
VkExtent2D output_size);
|
VkExtent2D output_size);
|
||||||
VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view,
|
VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view,
|
||||||
VkExtent2D input_image_extent, const Common::Rectangle<int>& crop_rect);
|
VkExtent2D input_image_extent, const Common::Rectangle<f32>& crop_rect);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void CreateDescriptorPool();
|
void CreateDescriptorPool();
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ VkViewport GetViewportState(const Device& device, const Maxwell& regs, size_t in
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (y_negate) {
|
if (y_negate) {
|
||||||
y += height;
|
y += conv(static_cast<f32>(regs.surface_clip.height));
|
||||||
height = -height;
|
height = -height;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -923,9 +923,13 @@ void RasterizerVulkan::UpdateDynamicStates() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerVulkan::HandleTransformFeedback() {
|
void RasterizerVulkan::HandleTransformFeedback() {
|
||||||
|
static std::once_flag warn_unsupported;
|
||||||
|
|
||||||
const auto& regs = maxwell3d->regs;
|
const auto& regs = maxwell3d->regs;
|
||||||
if (!device.IsExtTransformFeedbackSupported()) {
|
if (!device.IsExtTransformFeedbackSupported()) {
|
||||||
|
std::call_once(warn_unsupported, [&] {
|
||||||
LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported");
|
LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported");
|
||||||
|
});
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
query_cache.CounterEnable(VideoCommon::QueryType::StreamingByteCount,
|
query_cache.CounterEnable(VideoCommon::QueryType::StreamingByteCount,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// SPDX-FileCopyrightText: 2014 Citra Emulator Project
|
// SPDX-FileCopyrightText: 2014 Citra Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Text : Copyright 2022 yuzu Emulator Project
|
// Text : Copyright 2022 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|||||||
@@ -152,7 +152,7 @@ void ConfigureInput::Initialize(InputCommon::InputSubsystem* input_subsystem,
|
|||||||
connect(player_controllers[0], &ConfigureInputPlayer::HandheldStateChanged,
|
connect(player_controllers[0], &ConfigureInputPlayer::HandheldStateChanged,
|
||||||
[this](bool is_handheld) { UpdateDockedState(is_handheld); });
|
[this](bool is_handheld) { UpdateDockedState(is_handheld); });
|
||||||
|
|
||||||
advanced = new ConfigureInputAdvanced(this);
|
advanced = new ConfigureInputAdvanced(hid_core, this);
|
||||||
ui->tabAdvanced->setLayout(new QHBoxLayout(ui->tabAdvanced));
|
ui->tabAdvanced->setLayout(new QHBoxLayout(ui->tabAdvanced));
|
||||||
ui->tabAdvanced->layout()->addWidget(advanced);
|
ui->tabAdvanced->layout()->addWidget(advanced);
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// SPDX-FileCopyrightText: 2016 Citra Emulator Project
|
// SPDX-FileCopyrightText: 2016 Citra Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|||||||
@@ -4,11 +4,13 @@
|
|||||||
#include <QColorDialog>
|
#include <QColorDialog>
|
||||||
#include "common/settings.h"
|
#include "common/settings.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
|
#include "core/hid/emulated_controller.h"
|
||||||
|
#include "core/hid/hid_core.h"
|
||||||
#include "ui_configure_input_advanced.h"
|
#include "ui_configure_input_advanced.h"
|
||||||
#include "yuzu/configuration/configure_input_advanced.h"
|
#include "yuzu/configuration/configure_input_advanced.h"
|
||||||
|
|
||||||
ConfigureInputAdvanced::ConfigureInputAdvanced(QWidget* parent)
|
ConfigureInputAdvanced::ConfigureInputAdvanced(Core::HID::HIDCore& hid_core_, QWidget* parent)
|
||||||
: QWidget(parent), ui(std::make_unique<Ui::ConfigureInputAdvanced>()) {
|
: QWidget(parent), ui(std::make_unique<Ui::ConfigureInputAdvanced>()), hid_core{hid_core_} {
|
||||||
ui->setupUi(this);
|
ui->setupUi(this);
|
||||||
|
|
||||||
controllers_color_buttons = {{
|
controllers_color_buttons = {{
|
||||||
@@ -123,6 +125,8 @@ void ConfigureInputAdvanced::ApplyConfiguration() {
|
|||||||
player.button_color_left = colors[1];
|
player.button_color_left = colors[1];
|
||||||
player.body_color_right = colors[2];
|
player.body_color_right = colors[2];
|
||||||
player.button_color_right = colors[3];
|
player.button_color_right = colors[3];
|
||||||
|
|
||||||
|
hid_core.GetEmulatedControllerByIndex(player_idx)->ReloadColorsFromSettings();
|
||||||
}
|
}
|
||||||
|
|
||||||
Settings::values.debug_pad_enabled = ui->debug_enabled->isChecked();
|
Settings::values.debug_pad_enabled = ui->debug_enabled->isChecked();
|
||||||
|
|||||||
@@ -14,11 +14,15 @@ namespace Ui {
|
|||||||
class ConfigureInputAdvanced;
|
class ConfigureInputAdvanced;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace Core::HID {
|
||||||
|
class HIDCore;
|
||||||
|
} // namespace Core::HID
|
||||||
|
|
||||||
class ConfigureInputAdvanced : public QWidget {
|
class ConfigureInputAdvanced : public QWidget {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit ConfigureInputAdvanced(QWidget* parent = nullptr);
|
explicit ConfigureInputAdvanced(Core::HID::HIDCore& hid_core_, QWidget* parent = nullptr);
|
||||||
~ConfigureInputAdvanced() override;
|
~ConfigureInputAdvanced() override;
|
||||||
|
|
||||||
void ApplyConfiguration();
|
void ApplyConfiguration();
|
||||||
@@ -44,4 +48,6 @@ private:
|
|||||||
|
|
||||||
std::array<std::array<QColor, 4>, 8> controllers_colors;
|
std::array<std::array<QColor, 4>, 8> controllers_colors;
|
||||||
std::array<std::array<QPushButton*, 4>, 8> controllers_color_buttons;
|
std::array<std::array<QPushButton*, 4>, 8> controllers_color_buttons;
|
||||||
|
|
||||||
|
Core::HID::HIDCore& hid_core;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// SPDX-FileCopyrightText: 2016 Citra Emulator Project
|
// SPDX-FileCopyrightText: 2016 Citra Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|||||||
@@ -306,10 +306,10 @@ void ConfigureProfileManager::SetUserImage() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Some games crash when the profile image is too big. Resize any image bigger than 256x256
|
// Profile image must be 256x256
|
||||||
QImage image(image_path);
|
QImage image(image_path);
|
||||||
if (image.width() > 256 || image.height() > 256) {
|
if (image.width() != 256 || image.height() != 256) {
|
||||||
image = image.scaled(256, 256, Qt::KeepAspectRatio);
|
image = image.scaled(256, 256, Qt::KeepAspectRatioByExpanding, Qt::SmoothTransformation);
|
||||||
if (!image.save(image_path)) {
|
if (!image.save(image_path)) {
|
||||||
QMessageBox::warning(this, tr("Error resizing user image"),
|
QMessageBox::warning(this, tr("Error resizing user image"),
|
||||||
tr("Unable to resize image"));
|
tr("Unable to resize image"));
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// SPDX-FileCopyrightText: 2016 Citra Emulator Project
|
// SPDX-FileCopyrightText: 2016 Citra Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|||||||
@@ -156,7 +156,6 @@ std::unique_ptr<TranslationMap> InitializeTranslations(QWidget* parent) {
|
|||||||
// Ui General
|
// Ui General
|
||||||
INSERT(UISettings, select_user_on_boot, "Prompt for user on game boot", "");
|
INSERT(UISettings, select_user_on_boot, "Prompt for user on game boot", "");
|
||||||
INSERT(UISettings, pause_when_in_background, "Pause emulation when in background", "");
|
INSERT(UISettings, pause_when_in_background, "Pause emulation when in background", "");
|
||||||
INSERT(UISettings, confirm_before_closing, "Confirm exit while emulation is running", "");
|
|
||||||
INSERT(UISettings, confirm_before_stopping, "Confirm before stopping emulation", "");
|
INSERT(UISettings, confirm_before_stopping, "Confirm before stopping emulation", "");
|
||||||
INSERT(UISettings, hide_mouse, "Hide mouse on inactivity", "");
|
INSERT(UISettings, hide_mouse, "Hide mouse on inactivity", "");
|
||||||
INSERT(UISettings, controller_applet_disabled, "Disable controller applet", "");
|
INSERT(UISettings, controller_applet_disabled, "Disable controller applet", "");
|
||||||
|
|||||||
@@ -1908,7 +1908,10 @@ void GMainWindow::ConfigureFilesystemProvider(const std::string& filepath) {
|
|||||||
void GMainWindow::BootGame(const QString& filename, u64 program_id, std::size_t program_index,
|
void GMainWindow::BootGame(const QString& filename, u64 program_id, std::size_t program_index,
|
||||||
StartGameType type, AmLaunchType launch_type) {
|
StartGameType type, AmLaunchType launch_type) {
|
||||||
LOG_INFO(Frontend, "yuzu starting...");
|
LOG_INFO(Frontend, "yuzu starting...");
|
||||||
|
|
||||||
|
if (program_id > static_cast<u64>(Service::AM::Applets::AppletProgramId::MaxProgramId)) {
|
||||||
StoreRecentFile(filename); // Put the filename on top of the list
|
StoreRecentFile(filename); // Put the filename on top of the list
|
||||||
|
}
|
||||||
|
|
||||||
// Save configurations
|
// Save configurations
|
||||||
UpdateUISettings();
|
UpdateUISettings();
|
||||||
@@ -2174,6 +2177,7 @@ void GMainWindow::ShutdownGame() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
play_time_manager->Stop();
|
||||||
OnShutdownBegin();
|
OnShutdownBegin();
|
||||||
OnEmulationStopTimeExpired();
|
OnEmulationStopTimeExpired();
|
||||||
OnEmulationStopped();
|
OnEmulationStopped();
|
||||||
@@ -2737,7 +2741,7 @@ void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_pa
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto extracted = FileSys::ExtractRomFS(romfs, FileSys::RomFSExtractionType::Full);
|
const auto extracted = FileSys::ExtractRomFS(romfs);
|
||||||
if (extracted == nullptr) {
|
if (extracted == nullptr) {
|
||||||
failed();
|
failed();
|
||||||
return;
|
return;
|
||||||
@@ -3484,7 +3488,7 @@ void GMainWindow::OnExecuteProgram(std::size_t program_index) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void GMainWindow::OnExit() {
|
void GMainWindow::OnExit() {
|
||||||
OnStopGame();
|
ShutdownGame();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GMainWindow::OnSaveConfig() {
|
void GMainWindow::OnSaveConfig() {
|
||||||
@@ -4272,7 +4276,7 @@ void GMainWindow::OnToggleStatusBar() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void GMainWindow::OnAlbum() {
|
void GMainWindow::OnAlbum() {
|
||||||
constexpr u64 AlbumId = 0x010000000000100Dull;
|
constexpr u64 AlbumId = static_cast<u64>(Service::AM::Applets::AppletProgramId::PhotoViewer);
|
||||||
auto bis_system = system->GetFileSystemController().GetSystemNANDContents();
|
auto bis_system = system->GetFileSystemController().GetSystemNANDContents();
|
||||||
if (!bis_system) {
|
if (!bis_system) {
|
||||||
QMessageBox::warning(this, tr("No firmware available"),
|
QMessageBox::warning(this, tr("No firmware available"),
|
||||||
@@ -4295,7 +4299,7 @@ void GMainWindow::OnAlbum() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void GMainWindow::OnCabinet(Service::NFP::CabinetMode mode) {
|
void GMainWindow::OnCabinet(Service::NFP::CabinetMode mode) {
|
||||||
constexpr u64 CabinetId = 0x0100000000001002ull;
|
constexpr u64 CabinetId = static_cast<u64>(Service::AM::Applets::AppletProgramId::Cabinet);
|
||||||
auto bis_system = system->GetFileSystemController().GetSystemNANDContents();
|
auto bis_system = system->GetFileSystemController().GetSystemNANDContents();
|
||||||
if (!bis_system) {
|
if (!bis_system) {
|
||||||
QMessageBox::warning(this, tr("No firmware available"),
|
QMessageBox::warning(this, tr("No firmware available"),
|
||||||
@@ -4319,7 +4323,7 @@ void GMainWindow::OnCabinet(Service::NFP::CabinetMode mode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void GMainWindow::OnMiiEdit() {
|
void GMainWindow::OnMiiEdit() {
|
||||||
constexpr u64 MiiEditId = 0x0100000000001009ull;
|
constexpr u64 MiiEditId = static_cast<u64>(Service::AM::Applets::AppletProgramId::MiiEdit);
|
||||||
auto bis_system = system->GetFileSystemController().GetSystemNANDContents();
|
auto bis_system = system->GetFileSystemController().GetSystemNANDContents();
|
||||||
if (!bis_system) {
|
if (!bis_system) {
|
||||||
QMessageBox::warning(this, tr("No firmware available"),
|
QMessageBox::warning(this, tr("No firmware available"),
|
||||||
@@ -4847,7 +4851,12 @@ bool GMainWindow::SelectRomFSDumpTarget(const FileSys::ContentProvider& installe
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool GMainWindow::ConfirmClose() {
|
bool GMainWindow::ConfirmClose() {
|
||||||
if (emu_thread == nullptr || !UISettings::values.confirm_before_closing) {
|
if (emu_thread == nullptr ||
|
||||||
|
UISettings::values.confirm_before_stopping.GetValue() == ConfirmStop::Ask_Never) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (!system->GetExitLocked() &&
|
||||||
|
UISettings::values.confirm_before_stopping.GetValue() == ConfirmStop::Ask_Based_On_Game) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
const auto text = tr("Are you sure you want to close yuzu?");
|
const auto text = tr("Are you sure you want to close yuzu?");
|
||||||
@@ -4952,7 +4961,7 @@ bool GMainWindow::ConfirmChangeGame() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool GMainWindow::ConfirmForceLockedExit() {
|
bool GMainWindow::ConfirmForceLockedExit() {
|
||||||
if (emu_thread == nullptr || !UISettings::values.confirm_before_closing) {
|
if (emu_thread == nullptr) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
const auto text = tr("The currently running application has requested yuzu to not exit.\n\n"
|
const auto text = tr("The currently running application has requested yuzu to not exit.\n\n"
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user